blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51069274b991d4ae2de188fbec89a5d455c35b29 | c5cf46942decbda1c813474edb5a18b9595a8bf1 | /core/constants/default_values.py | a9b45a44b702b7574351ac83c4948842275b9980 | [
"MIT"
] | permissive | AsiganTheSunk/python3-gnosis-cli | 360b4da9fd86a642ec1e399bdb6c39d1dbcbb8ce | c4c2638aa75b8a8268ad899d6cea1e602227ef19 | refs/heads/master | 2023-03-18T17:38:38.594171 | 2019-11-15T00:01:34 | 2019-11-15T00:01:34 | 221,243,913 | 0 | 0 | MIT | 2023-03-03T23:35:41 | 2019-11-12T14:59:00 | JavaScript | UTF-8 | Python | false | false | 230 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# String Size Of Diferent Type of Addresses
CONTRACT_ADDRESS_LENGTH = 42
TX_ADDRESS_LENGTH = 66
# String Size of API Keys
INFURA_API_KEY_LENGTH = 32
ETHERSCAN_API_KEY_LENGTH = 34
| [
"[email protected]"
] | |
87fbf91d172aff23f865bdbf312d2f0c9889d385 | 344f52fe0c84ec3e6f6b655e7cc2309441506650 | /SlaverServer/run_slaver.py | 7586a801df09507a9552f6c7a9ab1c695d3ef21b | [
"MIT"
] | permissive | Hanlen520/BigBoss | f1c0b1b5a30e45149a1d0eefa914a3060640d5db | 8a3cfeace1eb2b852de940a6ba6b4a9d6bc814db | refs/heads/master | 2020-04-04T09:49:44.300429 | 2018-09-29T08:21:31 | 2018-09-29T08:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from tornado.web import Application
from tornado.ioloop import IOLoop
import argparse
from config import *
from router import SLAVER_ROUTER
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, help='set port')
args = parser.parse_args()
port = getattr(args, 'port') or GlobalConf.SLAVER_PORT
application = Application(SLAVER_ROUTER, **GlobalConf.SLAVER_SETTING)
application.listen(port, address='0.0.0.0')
IOLoop.instance().start()
| [
"[email protected]"
] | |
806a9c288ed6c350c6ad1a8a5e89c7587915ab49 | 6d7507b0695c5f704f1367604370f52a1cd60fe6 | /testfarm/test_program/app/honor/teacher/user_center/mine_collection/test_cases/test003_label_manage.py | 1534c47e571eaeaf83a4808cf18597aa790f1721 | [] | no_license | sj542484/test | f88b1f0524e853b24759de1bc8019a643bf11dcc | 908bef52867e3944b76898cfcc018fa403202815 | refs/heads/master | 2022-04-09T17:18:40.847936 | 2020-03-25T07:30:55 | 2020-03-25T07:30:55 | 194,576,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,187 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author : SUN FEIFEI
import unittest
import time
from conf.decorator import setup, teardown, testcase, teststeps
from app.honor.teacher.home.vanclass.object_page.home_page import ThomePage
from app.honor.teacher.login.object_page.login_page import TloginPage
from app.honor.teacher.test_bank.object_page.filter_page import FilterPage
from app.honor.teacher.test_bank.object_page.test_bank_page import TestBankPage
from app.honor.teacher.user_center.mine_collection.object_page.mine_collect_page import CollectionPage
from app.honor.teacher.user_center.mine_collection.test_data.add_label import label_data
from app.honor.teacher.user_center.user_information.object_page.user_center_page import TuserCenterPage
from utils.get_attribute import GetAttribute
from utils.toast_find import Toast
class Collection(unittest.TestCase):
"""我的收藏 -- 标签管理"""
@classmethod
@setup
def setUp(cls):
"""启动应用"""
cls.login = TloginPage()
cls.home = ThomePage()
cls.user = TuserCenterPage()
cls.filter = FilterPage()
cls.collect = CollectionPage()
cls.question = TestBankPage()
cls.get = GetAttribute()
@classmethod
@teardown
def tearDown(cls):
pass
@testcase
def test_collection_label_manage(self):
self.login.app_status() # 判断APP当前状态
if self.home.wait_check_page(): # 页面检查点
self.home.click_tab_profile() # 进入首页后点击‘个人中心’按钮
if self.user.wait_check_page(): # 页面检查点
self.user.click_mine_collection() # 点击 我的收藏
if self.collect.wait_check_page(): # 页面检查点
content = self.create_label_operation() # 创建标签 具体操作
modify = self.modify_label_operation(content) # 修改标签 操作
remove = self.remove_label_operation() # 移除标签 操作
self.judge_operation(remove, modify) # 验证 移除/修改标签 结果
if self.collect.wait_check_manage_page():
self.home.back_up_button() # 返回收藏页面
else:
print('未进入 我的收藏 页面')
if self.collect.wait_check_page():
self.home.back_up_button() # 返回个人中心页
else:
print('未进入个人中心页面')
if self.user.wait_check_page(): # 页面检查点
self.home.click_tab_hw() # 回首页
else:
Toast().get_toast() # 获取toast
print("未进入主界面")
@teststeps
def create_label_operation(self):
"""创建标签 具体操作"""
self.collect.more_button()
if self.collect.wait_check_label_manage_page():
self.collect.label_manage_button() # 标签管理按钮
content = [] # 创建的标签
if self.collect.wait_check_manage_page():
self.label_list(content) # 已有标签列表
print('---------------创建标签----------------')
k = 0
for j in range(len(label_data)):
if self.collect.wait_check_manage_list_page():
if label_data[j]['label'] not in content:
self.collect.add_label() # 创建标签
if self.home.wait_check_tips_page():
self.home.tips_title()
item = self.home.input()
item.send_keys(label_data[j]['label'])
print('标签:', item.text)
if self.get.enabled(self.home.commit_button()):
self.home.commit_button().click() # 点击 确定按钮
if self.collect.wait_check_manage_list_page():
label = self.collect.label_title() # 已有标签
if len(label) != len(content)+1:
time.sleep(1)
label = self.collect.label_title() # 已有标签
if label[-1].text != label_data[j]['label']:
print('★★★ Error- 创建标签失败')
else:
k += 1
print('创建标签成功')
print('--------------------')
if k == 2:
break
return content
@teststeps
def label_list(self, content):
"""已有标签列表"""
if self.collect.wait_check_manage_list_page():
print('已有标签:')
label = self.collect.label_title() # 已有标签
for i in range(len(label)):
content.append(label[i].text)
print(label[i].text)
elif self.home.wait_check_empty_tips_page():
print('暂无标签')
@teststeps
def modify_label_operation(self, content):
"""修改 标签"""
if self.collect.wait_check_manage_list_page():
print('------------移除/修改 标签-------------')
label = self.collect.label_title() # 已有标签
print('标签:', label[-2].text)
self.collect.open_menu(label[-2]) # 标签条目 左键长按
self.collect.menu_item(1) # 修改 该标签
if self.home.wait_check_input_page():
name = self.home.input()
name.send_keys(r'' + content[-1])
print('重命名为:', name.text)
self.home.commit_button().click() # 确定 按钮
Toast().toast_operation("自定义标签名重复")
print('--------------------')
if self.collect.wait_check_manage_page():
self.collect.open_menu(label[-2]) # 标签条目 左键长按
self.collect.menu_item(1) # 修改 该标签
if self.home.wait_check_input_page():
item = 0
name = self.home.input()
for i in range(len(label)):
if label_data[i]['label'] not in content:
name.send_keys(r'' + label_data[i]['label'])
print('重命名为:', name.text)
item = label_data[i]['label']
self.home.commit_button().click() # 确定 按钮
if self.collect.wait_check_manage_page():
print('--------------------')
break
return item
@teststeps
def remove_label_operation(self):
"""移除 标签"""
if self.collect.wait_check_manage_page():
if self.collect.wait_check_manage_list_page():
label = self.collect.label_title() # 已有标签
var = label[-1].text
self.collect.open_menu(label[-1]) # 标签条目 左键长按
self.collect.menu_item(0) # 移除该标签
if Toast().find_toast("删除自定义标签成功"):
print('移除标签 %s 成功' % var)
print('--------------------')
else:
print('★★★ Error- 未弹toast: 删除自定义标签成功')
return var
@teststeps
def judge_operation(self, remove, modify):
"""验证 移除/修改标签 结果"""
if self.collect.wait_check_manage_page():
print('----------验证 移除/修改 标签 结果----------')
if self.collect.wait_check_manage_list_page():
item = self.collect.label_title() # 已有标签
for i in range(len(item)):
print(item[i].text)
print('--------------------')
if item[-1].text in remove:
print('★★★ Error- 移除标签失败', item[-1].text)
if item[-1].text != modify:
print('★★★ Error- 重命名标签失败', item[-1].text)
else: # 恢复测试数据
self.collect.open_menu(item[-1]) # 标签条目 左键长按
self.collect.menu_item(0) # 移除该标签
if Toast().find_toast("删除自定义标签成功"):
print('恢复测试数据')
else:
print('★★★ Error- 未弹toast: 删除自定义标签成功')
elif self.home.wait_check_empty_tips_page():
print('★★★ Error- 暂无数据')
| [
"[email protected]"
] | |
1356618b0fc6adcc984e34e5f8a37827491754d1 | 8cf32a75a8e9a2a2e15b8c6a23694c4213d9a9ef | /horizon/openstack_dashboard/api/rest/keystone.py | d92a0d01bda64299460f03a9e88ef8811dc25590 | [
"Apache-2.0"
] | permissive | swethapts/devstack-controller | 6ae4f179c6dfe9e98aab2869416ed7fdcf4cda9c | ad405679f3b5d4825a3bc4a269fad3ce4c77ef7e | refs/heads/master | 2021-05-16T02:36:44.301793 | 2015-01-30T06:31:51 | 2015-01-30T06:31:51 | 30,010,810 | 0 | 1 | null | 2020-07-24T00:51:05 | 2015-01-29T08:25:40 | Python | UTF-8 | Python | false | false | 17,824 | py | # Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API over the keystone service.
"""
import django.http
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api.rest import urls
@urls.register
class Users(generic.View):
"""API for keystone users.
"""
url_regex = r'keystone/users/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of users.
By default, a listing of all users for the current domain are
returned. You may specify GET parameters for project_id, group_id and
domain_id to change that listing's context.
The listing result is an object with property "items".
"""
domain_context = request.session.get('domain_context')
result = api.keystone.user_list(
request,
project=request.GET.get('project_id'),
domain=request.GET.get('domain_id', domain_context),
group=request.GET.get('group_id')
)
return {'items': [u.to_dict() for u in result]}
@rest_utils.ajax(method='POST')
def post(self, request):
"""Perform some action on the collection of users.
The POST data should be an application/json object with two
parameters: "action" and "data".
action = "delete"
This action deletes multiple users in one call, using the list of
ids (strings) passed in as data.
This action returns HTTP 204 (no content) on success.
action = "create"
This action creates a user using the parameters supplied in
"data". The base parameters are name (string), email (string,
optional), password (string, optional), project_id (string,
optional), enabled (boolean, defaults to true). The user will be
created in the default domain.
This action returns the new user object on success.
This action returns HTTP 204 (no content) on success.
"""
action = request.DATA['action']
data = request.DATA['data']
if action == 'delete':
for user_id in data:
if user_id != request.user.id:
api.keystone.user_delete(request, user_id)
elif action == 'create':
# not sure why email is forced to None, but other code does it
domain = api.keystone.get_default_domain(request)
new_user = api.keystone.user_create(
request,
name=data['name'],
email=data.get('email') or None,
password=data.get('password'),
project=data.get('project_id'),
enabled=True,
domain=domain.id
)
return rest_utils.CreatedResponse(
'/api/keystone/users/%s' % new_user.id,
new_user.to_dict()
)
else:
raise rest_utils.AjaxError(400, 'invalid action')
@urls.register
class User(generic.View):
"""API for a single keystone user.
"""
url_regex = r'keystone/users/(?P<id>[0-9a-f]+|current)$'
@rest_utils.ajax()
def get(self, request, id):
"""Get a specific user by id.
If the id supplied is 'current' then the current logged-in user
will be returned, otherwise the user specified by the id.
"""
if id == 'current':
id = request.user.id
return api.keystone.user_get(request, id).to_dict()
@rest_utils.ajax()
def delete(self, request, id):
"""Delete a single user by id.
This method returns HTTP 204 (no content) on success.
"""
if id == 'current':
raise django.http.HttpResponseNotFound('current')
api.keystone.user_delete(request, id)
@rest_utils.ajax(method='PUT')
def put(self, request, id):
"""Update a single user.
The PUT data should be an application/json object with attributes to
set to new values: password (string), project_id (string),
enabled (boolean). A PUT may contain any one of those attributes, but
if it contains more than one it must contain the project_id, even
if it is not being altered.
This method returns HTTP 204 (no content) on success.
"""
keys = tuple(request.DATA)
if keys == ('password', ):
api.keystone.user_update_password(request, id, **request.DATA)
elif keys == ('enabled', ):
api.keystone.user_update_enabled(request, id, **request.DATA)
elif keys == ('project_id', ):
api.keystone.user_update_tenant(request, id,
project=request.DATA['project_id'])
else:
# update mutiple things, and hope the caller has supplied
# everything
request.DATA['project'] = request.DATA.pop('project_id', None)
request.DATA.setdefault('password', None)
api.keystone.user_update(request, id, **request.DATA)
@urls.register
class Roles(generic.View):
"""API over all roles.
"""
url_regex = r'keystone/roles/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of roles.
By default a listing of all roles are returned.
If the GET parameters project_id and user_id are specified then that
user's roles for that project are returned. If user_id is 'current'
then the current user's roles for that project are returned.
The listing result is an object with property "items".
"""
project_id = request.GET.get('project_id')
user_id = request.GET.get('user_id')
if project_id and user_id:
if user_id == 'current':
user_id = request.user.id
roles = api.keystone.roles_for_user(request, user_id,
project_id) or []
items = [r.to_dict() for r in roles]
else:
items = [r.to_dict() for r in api.keystone.role_list(request)]
return {'items': items}
@rest_utils.ajax(method='POST')
def post(self, request):
"""Perform some action on the collection of roles.
The POST data should be an application/json object with two
parameters: "action" and "data".
action = "delete"
This action deletes multiple roles in one call, using the list of
ids (strings) passed in as data.
This method returns HTTP 204 (no content) on success.
action = "create"
This action creates a role using the "name" (string) parameter
supplied in the "data" object.
This method returns the new role object on success.
action = "grant"
This action adds a role to a user using the parameters
"user_id" (string), "project_id" (string) and "role_id" (string).
This method returns HTTP 204 (no content) on success.
"""
action = request.DATA['action']
data = request.DATA['data']
if action == 'delete':
for role_id in data:
api.keystone.role_delete(request, role_id)
elif action == 'create':
new_role = api.keystone.role_create(request, data['name'])
return rest_utils.CreatedResponse(
'/api/keystone/roles/%s' % new_role.id,
new_role.to_dict()
)
elif action == 'grant':
api.keystone.add_tenant_user_role(
request,
data['project_id'],
data['user_id'],
data['role_id']
)
else:
raise rest_utils.AjaxError(400, 'invalid (unrecognised) action')
@urls.register
class Role(generic.View):
"""API for a single role.
"""
url_regex = r'keystone/roles/(?P<id>[0-9a-f]+|default)$'
@rest_utils.ajax()
def get(self, request, id):
"""Get a specific role by id.
If the id supplied is 'default' then the default role will be
returned, otherwise the role specified by the id.
"""
if id == 'default':
return api.keystone.get_default_role(request).to_dict()
return api.keystone.role_get(request, id).to_dict()
@rest_utils.ajax()
def delete(self, request, id):
"""Delete a single role by id.
This method returns HTTP 204 (no content) on success.
"""
if id == 'default':
raise django.http.HttpResponseNotFound('default')
api.keystone.role_delete(request, id)
@rest_utils.ajax(method='PUT')
def put(self, request, id):
"""Update a single role.
The PUT data should be an application/json object with the "name"
attribute to update
This method returns HTTP 204 (no content) on success.
"""
api.keystone.role_update(request, id, request.DATA['name'])
@urls.register
class Domains(generic.View):
"""API over all domains.
"""
url_regex = r'keystone/domains/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of domains.
A listing of all domains are returned.
The listing result is an object with property "items".
"""
items = [d.to_dict() for d in api.keystone.domain_list(request)]
return {'items': items}
@rest_utils.ajax(method='POST')
def post(self, request):
"""Perform some action on the collection of domains.
The POST data should be an application/json object with two
parameters: "action" and "data".
action = "delete"
This action deletes multiple domains in one call, using the list of
ids (strings) passed in as data.
This method returns HTTP 204 (no content) on success.
action = "create"
This action creates a domain using parameters supplied in the
"data" object. The "name" (string) parameter is required, others
are optional: "description" (string) and "enabled" (boolean,
defaults to true).
This method returns the new domain object on success.
"""
action = request.DATA['action']
data = request.DATA['data']
if action == 'delete':
for domain_id in data:
api.keystone.domain_delete(request, domain_id)
elif action == 'create':
new_domain = api.keystone.domain_create(
request,
data['name'],
description=data.get('description'),
enabled=data.get('enabled', True),
)
return rest_utils.CreatedResponse(
'/api/keystone/domains/%s' % new_domain.id,
new_domain.to_dict()
)
else:
raise rest_utils.AjaxError(400, 'invalid action')
@urls.register
class Domain(generic.View):
"""API over a single domains.
"""
url_regex = r'keystone/domains/(?P<id>[0-9a-f]+|default)$'
@rest_utils.ajax()
def get(self, request, id):
"""Get a specific domain by id.
If the id supplied is 'default' then the default domain will be
returned, otherwise the domain specified by the id.
"""
if id == 'default':
return api.keystone.get_default_domain(request).to_dict()
return api.keystone.domain_get(request, id).to_dict()
@rest_utils.ajax()
def delete(self, request, id):
"""Delete a single domain by id.
This method returns HTTP 204 (no content) on success.
"""
if id == 'default':
raise django.http.HttpResponseNotFound('default')
api.keystone.domain_delete(request, id)
@rest_utils.ajax()
def put(self, request, id):
"""Update a single domain.
The PUT data should be an application/json object with the attributes
to set to new values: "name" (string), "description" (string) and
"enabled" (boolean).
This method returns HTTP 204 (no content) on success.
"""
api.keystone.domain_update(
request,
id,
description=request.DATA.get('description'),
enabled=request.DATA.get('enabled'),
name=request.DATA.get('name')
)
def _tenant_kwargs_from_DATA(data, enabled=True):
# tenant_create takes arbitrary keyword arguments with only a small
# restriction (the default args)
kwargs = {'name': None, 'description': None, 'enabled': enabled,
'domain': data.pop('domain_id', None)}
kwargs.update(data)
return kwargs
@urls.register
class Projects(generic.View):
"""API over all projects.
Note that in the following "project" is used exclusively where in the
underlying keystone API the terms "project" and "tenant" are used
interchangeably.
"""
url_regex = r'keystone/projects/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of projects.
By default a listing of all projects for the current domain are
returned.
You may specify GET parameters for project_id (string), user_id
(string) and admin (boolean) to change that listing's context.
Additionally, paginate (boolean) and marker may be used to get
paginated listings.
The listing result is an object with properties:
items
The list of project objects.
has_more
Boolean indicating there are more results when pagination is used.
"""
result, has_more = api.keystone.tenant_list(
request,
paginate=request.GET.get('paginate', False),
marker=request.GET.get('marker'),
domain=request.GET.get('domain_id'),
user=request.GET.get('user_id'),
admin=request.GET.get('admin', True)
)
# return (list of results, has_more_data)
return dict(has_more=has_more, items=[d.to_dict() for d in result])
@rest_utils.ajax(method='POST')
def post(self, request):
"""Perform some action on the collection of projects (tenants).
The POST data should be an application/json object with two
parameters: "action" and "data".
action = "delete"
This action deletes multiple projects in one call, using the list
of ids (strings) passed in as data.
This method returns HTTP 204 (no content) on success.
action = "create"
This action creates a project using parameters supplied in the
"data" object. The "name" (string) parameter is required, others
are optional: "description" (string), "domain_id" (string) and
"enabled" (boolean, defaults to true). Additional, undefined
parameters may also be provided, but you'll have to look deep into
keystone to figure out what they might be.
This method returns the new project object on success.
"""
action = request.DATA['action']
data = request.DATA['data']
if action == 'delete':
for id in data:
api.keystone.tenant_delete(request, id)
elif action == 'create':
kwargs = _tenant_kwargs_from_DATA(data)
if not kwargs['name']:
raise rest_utils.AjaxError(400, '"name" is required')
new_project = api.keystone.tenant_create(
request,
kwargs.pop('name'),
**kwargs
)
return rest_utils.CreatedResponse(
'/api/keystone/projects/%s' % new_project.id,
new_project.to_dict()
)
else:
raise rest_utils.AjaxError(400, 'invalid action')
@urls.register
class Project(generic.View):
"""API over a single project.
Note that in the following "project" is used exclusively where in the
underlying keystone API the terms "project" and "tenant" are used
interchangeably.
"""
url_regex = r'keystone/projects/$'
@rest_utils.ajax()
def get(self, request, id):
"""Get a specific project by id.
"""
return api.keystone.tenant_get(request, id).to_dict()
@rest_utils.ajax()
def delete(self, request, id):
"""Delete a single project by id.
This method returns HTTP 204 (no content) on success.
"""
api.keystone.tenant_delete(request, id)
@rest_utils.ajax()
def put(self, request, id):
"""Update a single project.
The PUT data should be an application/json object with the attributes
to set to new values: "name" (string), "description" (string),
"domain_id" (string) and "enabled" (boolean). Additional, undefined
parameters may also be provided, but you'll have to look deep into
keystone to figure out what they might be.
This method returns HTTP 204 (no content) on success.
"""
kwargs = _tenant_kwargs_from_DATA(request.DATA, enabled=None)
api.keystone.tenant_update(request, id, **kwargs)
| [
"[email protected]"
] | |
764b44f56eb85a9c7ffe5cee4b564152e1b3aa7f | 85bf9a13bf62c1f074894d134c23dd992ae8688c | /javasolutions/p63/Solution.py | 0a7c244f231e3a5f619d33c503578c2873d98f7e | [] | no_license | pololee/oj-leetcode | 4cca3d309b2c9931d15d3cec4b07b5d9d22733ef | 78a8b27ee108ba93aa7b659665976112f48fc2c2 | refs/heads/master | 2020-06-21T02:15:26.882273 | 2020-02-06T04:56:21 | 2020-02-06T04:56:21 | 197,320,113 | 0 | 0 | null | 2020-02-06T04:56:23 | 2019-07-17T05:20:02 | Python | UTF-8 | Python | false | false | 921 | py | class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid or len(obstacleGrid) == 0 or len(obstacleGrid[0]) == 0:
return 0
if obstacleGrid[0][0] == 1:
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
DP = [[0 for _ in range(n)]
for _ in range(m)]
DP[0][0] = 1
for i in range(1, m):
DP[i][0] = 0 if obstacleGrid[i][0] == 1 else DP[i-1][0]
for j in range(1, n):
DP[0][j] = 0 if obstacleGrid[0][j] == 1 else DP[0][j-1]
for i in range(1, m):
for j in range(1, n):
if obstacleGrid[i][j] == 1:
DP[i][j] = 0
else:
DP[i][j] = DP[i-1][j] + DP[i][j-1]
return DP[m-1][n-1]
| [
"[email protected]"
] | |
27cbb16e31516b3cb28c074ac4404179beda786c | deafd775f238b2836f77b9140f4d6e14a3f3c06d | /python/ABC/ABC082/082_B.py | 2d02b85f31176d2942aec83f88cd72ff00a2d545 | [] | no_license | knakajima3027/Atcoder | ab8e2bf912173b7523fddbb11b38abd7e296762e | 64cb32fcc4b99501f2f84496e5535e1e7b14c467 | refs/heads/master | 2021-06-22T03:58:03.777001 | 2020-12-19T11:23:49 | 2020-12-19T11:23:49 | 135,173,223 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | s = input()
t = input()
S = []
T = []
for i in range(len(s)):
S.append(s[i])
for i in range(len(t)):
T.append(t[i])
S.sort()
T.sort()
T.reverse()
if S == T:
print('No')
elif min(S, T) == S:
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
26d9325e402bf6c5143407c4ebc339e006ab969f | 97aa1181a8305fab0cfc635954c92880460ba189 | /torch/distributions/lowrank_multivariate_normal.py | e6d6879c8d3a1f24c54f31ee0c8ffb8609b78208 | [
"BSD-2-Clause"
] | permissive | zhujiang73/pytorch_mingw | 64973a4ef29cc10b96e5d3f8d294ad2a721ccacb | b0134a0acc937f875b7c4b5f3cef6529711ad336 | refs/heads/master | 2022-11-05T12:10:59.045925 | 2020-08-22T12:10:32 | 2020-08-22T12:10:32 | 123,688,924 | 8 | 4 | NOASSERTION | 2022-10-17T12:30:52 | 2018-03-03T12:15:16 | C++ | UTF-8 | Python | false | false | 9,834 | py | import math
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.multivariate_normal import _batch_mahalanobis, _batch_mv
from torch.distributions.utils import _standard_normal, lazy_property
def _batch_capacitance_tril(W, D):
r"""
Computes Cholesky of :math:`I + W.T @ inv(D) @ W` for a batch of matrices :math:`W`
and a batch of vectors :math:`D`.
"""
m = W.size(-1)
Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
K = torch.matmul(Wt_Dinv, W).contiguous()
K.view(-1, m * m)[:, ::m + 1] += 1 # add identity matrix to K
return torch.cholesky(K)
def _batch_lowrank_logdet(W, D, capacitance_tril):
r"""
Uses "matrix determinant lemma"::
log|W @ W.T + D| = log|C| + log|D|,
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute
the log determinant.
"""
return 2 * capacitance_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + D.log().sum(-1)
def _batch_lowrank_mahalanobis(W, D, x, capacitance_tril):
r"""
Uses "Woodbury matrix identity"::
inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D),
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute the squared
Mahalanobis distance :math:`x.T @ inv(W @ W.T + D) @ x`.
"""
Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
Wt_Dinv_x = _batch_mv(Wt_Dinv, x)
mahalanobis_term1 = (x.pow(2) / D).sum(-1)
mahalanobis_term2 = _batch_mahalanobis(capacitance_tril, Wt_Dinv_x)
return mahalanobis_term1 - mahalanobis_term2
class LowRankMultivariateNormal(Distribution):
r"""
Creates a multivariate normal distribution with covariance matrix having a low-rank form
parameterized by :attr:`cov_factor` and :attr:`cov_diag`::
covariance_matrix = cov_factor @ cov_factor.T + cov_diag
Example:
>>> m = LowRankMultivariateNormal(torch.zeros(2), torch.tensor([[1.], [0.]]), torch.ones(2))
>>> m.sample() # normally distributed with mean=`[0,0]`, cov_factor=`[[1],[0]]`, cov_diag=`[1,1]`
tensor([-0.2102, -0.5429])
Args:
loc (Tensor): mean of the distribution with shape `batch_shape + event_shape`
cov_factor (Tensor): factor part of low-rank form of covariance matrix with shape
`batch_shape + event_shape + (rank,)`
cov_diag (Tensor): diagonal part of low-rank form of covariance matrix with shape
`batch_shape + event_shape`
Note:
The computation for determinant and inverse of covariance matrix is avoided when
`cov_factor.shape[1] << cov_factor.shape[0]` thanks to `Woodbury matrix identity
<https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_ and
`matrix determinant lemma <https://en.wikipedia.org/wiki/Matrix_determinant_lemma>`_.
Thanks to these formulas, we just need to compute the determinant and inverse of
the small size "capacitance" matrix::
capacitance = I + cov_factor.T @ inv(cov_diag) @ cov_factor
"""
arg_constraints = {"loc": constraints.real,
"cov_factor": constraints.real,
"cov_diag": constraints.positive}
support = constraints.real
has_rsample = True
def __init__(self, loc, cov_factor, cov_diag, validate_args=None):
if loc.dim() < 1:
raise ValueError("loc must be at least one-dimensional.")
event_shape = loc.shape[-1:]
if cov_factor.dim() < 2:
raise ValueError("cov_factor must be at least two-dimensional, "
"with optional leading batch dimensions")
if cov_factor.shape[-2:-1] != event_shape:
raise ValueError("cov_factor must be a batch of matrices with shape {} x m"
.format(event_shape[0]))
if cov_diag.shape[-1:] != event_shape:
raise ValueError("cov_diag must be a batch of vectors with shape {}".format(event_shape))
loc_ = loc.unsqueeze(-1)
cov_diag_ = cov_diag.unsqueeze(-1)
try:
loc_, self.cov_factor, cov_diag_ = torch.broadcast_tensors(loc_, cov_factor, cov_diag_)
except RuntimeError:
raise ValueError("Incompatible batch shapes: loc {}, cov_factor {}, cov_diag {}"
.format(loc.shape, cov_factor.shape, cov_diag.shape))
self.loc = loc_[..., 0]
self.cov_diag = cov_diag_[..., 0]
batch_shape = self.loc.shape[:-1]
self._unbroadcasted_cov_factor = cov_factor
self._unbroadcasted_cov_diag = cov_diag
self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag)
super(LowRankMultivariateNormal, self).__init__(batch_shape, event_shape,
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LowRankMultivariateNormal, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
new.loc = self.loc.expand(loc_shape)
new.cov_diag = self.cov_diag.expand(loc_shape)
new.cov_factor = self.cov_factor.expand(loc_shape + self.cov_factor.shape[-1:])
new._unbroadcasted_cov_factor = self._unbroadcasted_cov_factor
new._unbroadcasted_cov_diag = self._unbroadcasted_cov_diag
new._capacitance_tril = self._capacitance_tril
super(LowRankMultivariateNormal, new).__init__(batch_shape,
self.event_shape,
validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return self.loc
@lazy_property
def variance(self):
return (self._unbroadcasted_cov_factor.pow(2).sum(-1)
+ self._unbroadcasted_cov_diag).expand(self._batch_shape + self._event_shape)
@lazy_property
def scale_tril(self):
# The following identity is used to increase the numerically computation stability
# for Cholesky decomposition (see http://www.gaussianprocess.org/gpml/, Section 3.4.3):
# W @ W.T + D = D1/2 @ (I + D-1/2 @ W @ W.T @ D-1/2) @ D1/2
# The matrix "I + D-1/2 @ W @ W.T @ D-1/2" has eigenvalues bounded from below by 1,
# hence it is well-conditioned and safe to take Cholesky decomposition.
n = self._event_shape[0]
cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1)
Dinvsqrt_W = self._unbroadcasted_cov_factor / cov_diag_sqrt_unsqueeze
K = torch.matmul(Dinvsqrt_W, Dinvsqrt_W.transpose(-1, -2)).contiguous()
K.view(-1, n * n)[:, ::n + 1] += 1 # add identity matrix to K
scale_tril = cov_diag_sqrt_unsqueeze * torch.cholesky(K)
return scale_tril.expand(self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
covariance_matrix = (torch.matmul(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_factor.transpose(-1, -2))
+ torch.diag_embed(self._unbroadcasted_cov_diag))
return covariance_matrix.expand(self._batch_shape + self._event_shape +
self._event_shape)
@lazy_property
def precision_matrix(self):
# We use "Woodbury matrix identity" to take advantage of low rank form::
# inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D)
# where :math:`C` is the capacitance matrix.
Wt_Dinv = (self._unbroadcasted_cov_factor.transpose(-1, -2)
/ self._unbroadcasted_cov_diag.unsqueeze(-2))
A = torch.triangular_solve(Wt_Dinv, self._capacitance_tril, upper=False)[0]
precision_matrix = (torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal())
- torch.matmul(A.transpose(-1, -2), A))
return precision_matrix.expand(self._batch_shape + self._event_shape +
self._event_shape)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
W_shape = shape[:-1] + self.cov_factor.shape[-1:]
eps_W = _standard_normal(W_shape, dtype=self.loc.dtype, device=self.loc.device)
eps_D = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return (self.loc + _batch_mv(self._unbroadcasted_cov_factor, eps_W)
+ self._unbroadcasted_cov_diag.sqrt() * eps_D)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_lowrank_mahalanobis(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
diff,
self._capacitance_tril)
log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
self._capacitance_tril)
return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + log_det + M)
def entropy(self):
log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
self._capacitance_tril)
H = 0.5 * (self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + log_det)
if len(self._batch_shape) == 0:
return H
else:
return H.expand(self._batch_shape)
| [
"[email protected]"
] | |
29819db9f9c77633b810c3ba2978e48537b4e7a3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_209/544.py | 22e6c7236e14da498cdcf8d941798513af589657 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | # input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
import math
def get_surface(p):
return math.pi * p[0]**2
def get_side_surface(p):
return 2 * math.pi * p[0] * p[1]
def get_area(p):
if p is None:
return 0
return get_surface(p) + get_side_surface(p)
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
str_number = input().split(" ")
n, k = int(str_number[0]), int(str_number[1])
pancakes = []
for j in range(n):
str_number = input().split(" ")
pancakes.append((int(str_number[0]), int(str_number[1])))
pancakes_hsorted = sorted(pancakes, key=lambda x: get_side_surface(x),reverse=True)
sol = sorted(pancakes_hsorted[:(k-1)], reverse=True)
for p in sol:
pancakes.remove(p)
additional_area = 0
if len(sol) > 0:
for p in pancakes:
if p[0] > sol[0][0]:
additional_area = max(additional_area, get_side_surface(p) + (get_surface(p) - get_surface(sol[0])))
else:
additional_area = max(additional_area, get_side_surface(p))
else:
sol = [sorted(pancakes, key=lambda x: get_area(x), reverse=True)[0]]
area = additional_area
for j in range(len(sol)):
area += get_area(sol[j])
if j+1 < len(sol):
area -= get_surface(sol[j+1])
print("Case #{}: {:.9f}".format(i, area))
| [
"[email protected]"
] | |
c5a1300c883da1921c805a18c66e4dce27d6c66e | c51eef37bb983a9c35635c7ccc96a0cf689a7438 | /lecture/lecture_gn5/week1/03_parameter.py | 21215f034b5e08cd1b40992831d3dda1790f6621 | [] | no_license | Kyeongrok/python_crawler | 0a717b43be36584af1b0f7c1ad0c79108a5d11e0 | 5a5da8af7bb080f752a9a066741ac8adab136a3a | refs/heads/master | 2022-09-13T03:15:08.053639 | 2022-08-02T15:45:03 | 2022-08-02T15:45:03 | 124,719,435 | 40 | 34 | null | 2019-02-27T08:29:52 | 2018-03-11T03:20:32 | HTML | UTF-8 | Python | false | false | 477 | py | # parameter 파라메터 매개변수
# 변수 변하는 수(값) ""가 없음
# 상수 항상 같은 수(값) ""가 있음
def printMessage(message):
print(message)
printMessage("happy spring")
# 차이점
# 1. 함수 이름 옆에 괄호에 뭐가 있다 없다.
# 2. 함수를 호출 할 때 괄호안에 뭐가 있고 없고
# 3. print()함수에 "hello", message
# ctrl + tab 이전 파일로 가기
# ctrl + tab + tab 전전으로 가기
# ctrl + e
# command + e | [
"[email protected]"
] | |
aeac86300ebab4cf0eff0327474f80f2a6ed7843 | 3f06e7ae747e935f7a2d1e1bae27a764c36a77d1 | /day11.py | f6ef0bfc5790ca1418db1e53ada53842434704b4 | [] | no_license | mn113/adventofcode2016 | 94465f36c46e9aa21d879d82e043e1db8c55c9da | 3a93b23519acbfe326b8bf7c056f1747bbea036a | refs/heads/master | 2022-12-11T22:57:21.937221 | 2022-12-04T16:37:24 | 2022-12-04T16:37:24 | 75,545,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | #! /usr/bin/env python
# Transport all items to floor 4, avoiding dangerous situations
# (Missionaries/cannibals problem)
#4:
#3:
#2: lp
#1: E trc LPTRC
trips = 0
elev = 1
state = { # tuple represents: (chip's floor, generator's floor)
'V': (2,1),
'W': (2,1),
'X': (1,1),
'Y': (1,1),
'Z': (1,1)
}
goal = {key: (4,4) for key in 'VWXYZ'}
computedStates = {0: state}
newStates = []
def isValid(state):
for k,v in state.items():
# Stay between 1 and 4 please:
if v[0] > 4 or v[0] < 1 or v[1] > 4 or v[1] < 1:
print v, "out of bounds"
return False
if v[0] != v[1]:
# Chip and its generator are apart
for k2,v2 in state.items():
if v2[0] == v[0]:
# Another generator will infect this chip
print "infection", state
return False
print state, "ok"
return True
# NEED TO STORE THEM ON EACH OTHER AND COUNT MOVES
# binary search
# 28 = too low
# 53 = too high
# 40 = too low
# 47 = correct
# part 2
# 61 = too low
# 71 = correct
| [
"[email protected]"
] | |
f538641ad09d7696971988e32aa9d00b57082efc | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/furniture/shared_furniture_bookcase_modern.py | f02d735683dbae730eb4a049b73eda113bf64955 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 464 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_bookcase_modern.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"[email protected]"
] | |
bd56c0c31c38bfee93331f533c406ffe5b138e9f | 4ba18540bfd8c523fe39bbe7d6c8fa29d4ec0947 | /atlas/foundations_rest_api/src/test/filters/parsers/test_number_parser.py | f1fe582e071ade8fc93e52edbe6d1953879e05b3 | [
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MPL-2.0"
] | permissive | yottabytt/atlas | c9d8ef45a0921c9f46d3ed94d42342f11488a85e | b040e574fbc64c833039b003f8a90345dd98e0eb | refs/heads/master | 2022-10-14T11:12:12.311137 | 2020-06-13T13:19:35 | 2020-06-13T13:19:35 | 272,008,756 | 0 | 0 | Apache-2.0 | 2020-06-13T12:55:29 | 2020-06-13T12:55:28 | null | UTF-8 | Python | false | false | 1,438 | py | import unittest
from foundations_rest_api.filters.parsers import NumberParser
class TestNumberParser(unittest.TestCase):
def setUp(self):
self._parser = NumberParser()
def test_random_value(self):
value = 'attack'
self.assertRaises(ValueError, self._parser.parse, value)
def test_good_string_value(self):
value = '3.14'
parsed_value = self._parser.parse(value)
expected_result = 3.14
self.assertEqual(expected_result, parsed_value)
def test_good_float_value(self):
value = 9.8
parsed_value = self._parser.parse(value)
expected_result = 9.8
self.assertEqual(expected_result, parsed_value)
def test_goog_int_value(self):
value = 5
parsed_value = self._parser.parse(value)
expected_result = 5
self.assertEqual(expected_result, parsed_value)
def test_good_string_int_value(self):
value = '10'
parsed_value = self._parser.parse(value)
expected_result = 10
self.assertEqual(expected_result, parsed_value)
def test_bad_none_value(self):
value = None
with self.assertRaises(ValueError) as cm:
self._parser.parse(value)
self.assertEqual(str(cm.exception), 'Not able to convert "None" to a number')
def test_bad_null_value(self):
value = 'null'
self.assertRaises(ValueError, self._parser.parse, value)
| [
"[email protected]"
] | |
e072b567a6d98e5d00916d792ef4296106d3c78f | 83552e8512ec76739310b2227c57a76b2080eccc | /plotObjects/fittingFunctions.py | f2bd51ae879114fe986fb46e4f0491881adcf19b | [] | no_license | mlink1990/experimentEagle | c4617e0ec1080ca04ec62b39c92f0c977ec28b09 | 90394c2d9c2ac68165647bcc8c57c09099c37cf6 | refs/heads/master | 2022-11-07T09:05:49.134877 | 2020-06-21T13:47:44 | 2020-06-21T13:47:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,180 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 05 09:55:44 2016
@author: User
"""
import scipy
#here is the class definition for fitting funtion
class FittingFunction(object):
"""wrapper for a python fitting function. adds the optional guess functions,
verbal description and allows """
def __init__(self, fittingFunction, guessFunction=None,desc=None,prettyEquationString=None):
self.fittingFunction = fittingFunction
self.guessFunction = guessFunction
self.desc = desc
self.prettyEquationString = prettyEquationString
#######
#DEFINE FITTING FUNCTIONS AND GUESS FUNCTIONS
#######
#standard fitting functions
##################################################
def linear(x,m,c):
return m*x+c
def linear_guess(xs,ys):
return {"m":(ys.max()-ys.min())/(xs.max()-xs.min()), "c":scipy.average(ys)}
##################################################
def quadratic(x,a,b,c):
return a*x**2+b*x+c
##################################################
def gaussian(x,A, sigma, x0, B):
return A*scipy.exp( -(x-x0)**2. / (2.*sigma**2. ) )+B
def gaussian_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
#x0 = (xs.max()+xs.min())/2.0
maximumIndex = scipy.argmax(A)
x0 = xs[maximumIndex]
sigma = (xs.max()-x0)/2.0
return {"A":A,"B":B,"x0":x0,"sigma":sigma}
##################################################
def lorentzian(x,x0,gamma,A,B):
return A*gamma**2 / ((x-x0)**2+gamma**2)+B
def lorentzian_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
#x0 = (xs.max()+xs.min())/2.0
maximumIndex = scipy.argmax(A)
x0 = xs[maximumIndex]
sigma = (xs.max()-x0)/2.0
return {"A":A,"B":B,"x0":x0,"sigma":sigma}
##################################################
def parabola(x,x0,a,B):
return a*(x-x0)**2+B
def parabola_guess(xs,ys):
return {"a":1, "B":scipy.average(ys),"x0":(xs.max()+xs.min())/2.0}
##################################################
def exponentialDecay(x,A,tau,B):
return A*scipy.exp(-x/tau)+B
def exponentialDecay_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
tau = (xs.max()+xs.min())/2.0
return {"A":A,"B":B,"tau":tau}
##################################################
def sineWave(x, f, phi, A,B):
return A*scipy.sin(2*scipy.pi*f*x+phi)+B
def sineWave_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
phi = 0.001
f = 1.0/(xs.max()-xs.min())
return {"A":A,"B":B,"phi":phi,"f":f}
##################################################
def sineWaveDecay1(x, f, phi, A,B, tau):
return A*scipy.exp(-x/tau)*scipy.sin(2*scipy.pi*f*x+phi)+B
def sineWaveDecay1_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
phi = 0.001
f = 1.0/(xs.max()-xs.min())
tau = (xs.max()+xs.min())/2.0
return {"A":A,"B":B,"phi":phi,"f":f, "tau":tau}
##################################################
def sineWaveDecay2(x, f, phi, A,B, tau):
return A*scipy.exp(-x/tau)*scipy.sin(2*scipy.pi*((f**2-(1/(2*scipy.pi*tau))**2)**0.5)*x+phi)+B
def sineWaveDecay2_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
phi = 0.001
f = 1.0/(xs.max()-xs.min())
tau = (xs.max()+xs.min())/2.0
return {"A":A,"B":B,"phi":phi,"f":f, "tau":tau}
##################################################
def sincSquared(x,A,B,tau,x0):
return A*(scipy.sinc(tau * 2*scipy.pi * (x-x0) ))**2 / (2*scipy.pi) + B
##################################################
def sineSquared(x, f, phi, A, B):
return A*scipy.sin(2*scipy.pi*f*x+phi)**2+B
def sineSquared_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
phi = 0.001
f = 1.0/(xs.max()-xs.min())
return {"A":A,"B":B,"phi":phi,"f":f}
##################################################
def sineSquaredDecay(x, f, phi, A, B, tau):
return A*scipy.exp(-x/tau)*scipy.sin(2*scipy.pi*f*(x+phi)/2.0)**2+B
def sineSquaredDecay_guess(xs,ys):
A=ys.max()-ys.min()
B=ys.min()
phi = 0.001
f = 1.0/(xs.max()-xs.min())
tau = (xs.max()+xs.min())/2.0
return {"A":A,"B":B,"phi":phi,"f":f, "tau":tau}
##################################################
| [
"[email protected]"
] | |
9f6a4b3cea3a4768b3ed119ba3461165975946c9 | 15e4ea46e2b1944add82746c4b3369184550af1b | /9 Strings/Excersises/21.py | e82a496856e05d43914ebf743eed1cf243c8a9bb | [] | no_license | eduardogomezvidela/Summer-Intro | 53204a61b05066d8b8bc1ef234e83e15f823934d | 649a85b71a7e76eade3665554b03ca65108c648b | refs/heads/master | 2021-04-29T13:34:26.873513 | 2018-02-16T13:35:48 | 2018-02-16T13:35:48 | 121,754,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | #Caesar's Cipher
message = "Hello World" #uryyb jbeyq
encrypted_message = ''
for char in message:
o_char = char
char = ord(char)
if char + 13 < 122 and char + 13 > 104: #lowercase letters
char = (char) +13
encrypted_message = encrypted_message + chr(char)
elif (char) + 13 > 122:
char = 96 + char + 13 - 122
encrypted_message = encrypted_message + chr(char)
elif char + 13 < 90 and char + 13 > 64: #uppercase letters
char = (char) +13
encrypted_message = encrypted_message + chr(char)
elif (char) + 13 > 90:
char = 64 + char + 13 - 90
encrypted_message = encrypted_message + chr(char)
else:
encrypted_message = encrypted_message + o_char #spaces and grammar
print(encrypted_message)
| [
"[email protected]"
] | |
b5870f9d11d9067baf5434f119b2c389ae719b51 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_asia.py | 1cdcd711f60b6da65ba29fce4d6bf8d63cc4eef0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py |
#calss header
class _ASIA():
def __init__(self,):
self.name = "ASIA"
self.definitions = [u'the continent that is to the east of Europe, the west of the Pacific Ocean, and the north of the Indian Ocean']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
fdfb29f8d07af2d86a4bb2d860beafb88397900a | bcabce262e54a6ac38948a4717254cdc3ce65874 | /mealpy/swarm_based/SpaSA.py | 7b315ab4cfa31caba129621a65b40fc05eff5414 | [
"MIT"
] | permissive | ibrahim85/MEta-heuristics-ALgorithms-in-PYthon | 4ab6e6ef54127b6f4721178a1f855d1be91f9b42 | 47fb428e8378fc52cd5fe6eff20cec1c68ba5039 | refs/heads/master | 2023-06-03T05:23:31.993100 | 2021-06-28T14:48:38 | 2021-06-28T14:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,531 | py | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 17:22, 29/05/2020 %
# %
# Email: [email protected] %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform, normal
from numpy import abs, exp, ones, sign, matmul
from numpy.linalg import inv
from copy import deepcopy
from mealpy.root import Root
class BaseSpaSA(Root):
"""
My version of: Sparrow Search Algorithm (SpaSA)
(A novel swarm intelligence optimization approach: sparrow search algorithm)
Link:
https://doi.org/10.1080/21642583.2019.1708830
Noted:
+ First, I sort the algorithm and find g-best and g-worst
+ In Eq. 4, Instead of using A+ and L, I used normal().
+ Their algorithm 1 flow is missing all important component such as g_best_position, fitness updated,
+ After change some equations and flows --> this become the BEST algorithm
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
ST=0.8, PD=0.2, SD=0.1, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.ST = ST # ST in [0.5, 1.0], safety threshold value
self.PD = PD # number of producers
self.SD = SD # number of sparrows who perceive the danger
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
n1 = int(self.PD * self.pop_size)
n2 = int(self.SD * self.pop_size)
for epoch in range(self.epoch):
r2 = uniform() # R2 in [0, 1], the alarm value, random value
# Using equation (3) update the sparrow’s location;
for i in range(0, n1):
if r2 < self.ST:
x_new = pop[i][self.ID_POS] * exp((i+1) / ((uniform() + self.EPSILON) * self.epoch))
else:
x_new = pop[i][self.ID_POS] + normal() * ones(self.problem_size)
x_new = self.amend_position_random_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
x_p = deepcopy(sorted(pop[:n1], key=lambda item: item[self.ID_FIT])[0][self.ID_POS])
worst = deepcopy(sorted(pop, key=lambda item: item[self.ID_FIT])[-1])
# Using equation (4) update the sparrow’s location;
for i in range(n1, self.pop_size):
if i > int(self.pop_size / 2):
x_new = normal() * exp((worst[self.ID_POS] - pop[i][self.ID_POS]) / (i+1)**2)
else:
x_new = x_p + abs(pop[i][self.ID_POS] - x_p) * normal()
x_new = self.amend_position_random_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
# Using equation (5) update the sparrow’s location;
for i in range(0, n2):
if pop[i][self.ID_FIT] > g_best[self.ID_FIT]:
x_new = g_best[self.ID_POS] + normal() * abs(pop[i][self.ID_POS] - g_best[self.ID_POS])
else:
x_new = pop[i][self.ID_POS] + uniform(-1, 1) * \
(abs(pop[i][self.ID_POS] - worst[self.ID_POS]) / (pop[i][self.ID_FIT] - worst[self.ID_FIT] + self.EPSILON))
x_new = self.amend_position_random_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class OriginalSpaSA(Root):
"""
The original version of: Sparrow Search Algorithm
(A novel swarm intelligence optimization approach: sparrow search algorithm)
Link:
https://doi.org/10.1080/21642583.2019.1708830
Note:
+ Very weak algorithm
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
ST=0.8, PD=0.2, SD=0.1, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.ST = ST # ST in [0.5, 1.0], safety threshold value
self.PD = PD # number of producers
self.SD = SD # number of sparrows who perceive the danger
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
g_best, g_worst = self.get_global_best_global_worst_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
n1 = int(self.PD * self.pop_size)
n2 = int(self.SD * self.pop_size)
for epoch in range(self.epoch):
r2 = uniform() # R2 in [0, 1], the alarm value, random value
# Using equation (3) update the sparrow’s location;
for i in range(0, n1):
if r2 < self.ST:
x_new = pop[i][self.ID_POS] * exp((i + 1) / (uniform() * self.epoch))
else:
x_new = pop[i][self.ID_POS] + normal() * ones(self.problem_size)
x_new = self.amend_position_random_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
# Using equation (4) update the sparrow’s location;
for i in range(n1, self.pop_size):
if i > int(self.pop_size / 2):
x_new = normal() * exp((g_worst[self.ID_POS] - pop[i][self.ID_POS]) / (i + 1) ** 2)
else:
L = ones((1, self.problem_size))
A = sign(uniform(-1, 1, (1, self.problem_size)))
A1 = A.T * inv(matmul(A, A.T)) * L
x_new = g_best[self.ID_POS] + matmul(abs(pop[i][self.ID_POS] - g_best[self.ID_POS]), A1)
x_new = self.amend_position_random_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
# Using equation (5) update the sparrow’s location;
for i in range(0, n2):
if pop[i][self.ID_FIT] > g_best[self.ID_FIT]:
x_new = g_best[self.ID_POS] + normal() * abs(pop[i][self.ID_POS] - g_best[self.ID_POS])
else:
x_new = pop[i][self.ID_POS] + uniform(-1, 1) * \
(abs(pop[i][self.ID_POS] - g_worst[self.ID_POS]) / (pop[i][self.ID_FIT] - g_worst[self.ID_FIT] + self.EPSILON))
x_new = self.amend_position_random_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| [
"[email protected]"
] | |
5e1ef1d6823ccae17da98b6f30009066aabdf8fc | 01abb5fe2d6a51e8ee4330eaead043f4f9aad99d | /Repo_Files/Zips/plugin.video.streamhub/resources/lib/ssources/dizigold.py | 2cd5ccfebc47753666aeae3c3f62630f4fe00744 | [] | no_license | MrAnhell/StreamHub | 01bb97bd3ae385205f3c1ac6c0c883d70dd20b9f | e70f384abf23c83001152eae87c6897f2d3aef99 | refs/heads/master | 2021-01-18T23:25:48.119585 | 2017-09-06T12:39:41 | 2017-09-06T12:39:41 | 87,110,979 | 0 | 0 | null | 2017-04-03T19:09:49 | 2017-04-03T19:09:49 | null | UTF-8 | Python | false | false | 4,323 | py | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.smodules import cleantitle
from resources.lib.smodules import client
from resources.lib.smodules import cache
from resources.lib.smodules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['dizigold.net', 'dizigold.org']
self.base_link = 'http://www.dizigold.org'
self.player_link = 'http://player.dizigold.org/?id=%s&s=1&dil=or'
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
result = cache.get(self.dizigold_tvcache, 120)
tvshowtitle = cleantitle.get(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def dizigold_tvcache(self):
try:
result = client.request(self.base_link)
result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0]
result = re.compile('href="(.+?)">(.+?)<').findall(result)
result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result]
result = [(i[0], cleantitle.get(i[1])) for i in result]
return result
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if url == None: return
url = '/%s/%01d-sezon/%01d-bolum' % (url.replace('/', ''), int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
result = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
query = self.player_link % result
result = client.request(query, headers={'Referer': url})
try:
url = client.parseDOM(result, 'iframe', ret='src')[-1]
if 'openload' in url:
host = 'openload.co' ; direct = False ; url = [{'url': url, 'quality': 'HD'}]
elif 'ok.ru' in url:
host = 'vk' ; direct = True ; url = directstream.odnoklassniki(url)
elif 'vk.com' in url:
host = 'vk' ; direct = True ; url = directstream.vk(url)
else: raise Exception()
for i in url: sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
try:
url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| [
"[email protected]"
] | |
bf3b8692df156e8ab78abcd681ded23c4759b911 | 6230dd7501bb504643cb3b8d8d18889f4bc9e292 | /web_frameworks/templates_advanced/apps.py | ba80c31d12cea247b98e09edde360664c7894e0a | [
"MIT"
] | permissive | Minkov/python-web-frameworks-2020-11 | f83a8560cbbcd06549bcacaca83de3af4824adc6 | 5857bb626792a9efe1f2d06677fa3779f5e2cc1d | refs/heads/main | 2023-01-21T07:02:46.141981 | 2020-12-01T18:30:20 | 2020-12-01T18:30:20 | 310,352,954 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from django.apps import AppConfig
class TemplatesAdvancedConfig(AppConfig):
name = 'templates_advanced'
| [
"[email protected]"
] | |
ec3bb5480e949b6d5afe23cab0f0ff0f2bc524ae | 62b75c03509dcd993a28eba2bb7004ae5f427f73 | /astropy/vo/validator/setup_package.py | aceae1de0abb38b79a066bd58d1ede6767e1a321 | [] | permissive | xiaomi1122/astropy | 08aba5592d9bb54e725708352e34db89af2ec289 | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | refs/heads/master | 2020-04-09T12:27:36.768462 | 2018-12-06T01:11:23 | 2018-12-06T01:11:23 | 160,299,140 | 0 | 0 | BSD-3-Clause | 2018-12-04T04:52:22 | 2018-12-04T04:52:22 | null | UTF-8 | Python | false | false | 273 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {
'astropy.vo.validator': ['data/*.txt'],
'astropy.vo.validator.tests': ['data/*.json', 'data/*.xml',
'data/*.out']}
| [
"[email protected]"
] | |
2754b656690e5da0db18e71746cd1661d2f373c1 | 9790af3da573b4fd28910ec23da9d877dbddc5e5 | /.history/trang_thai_dau_20210906145049.py | af50a67a1a7d0f670f5deafea2e69773d60efe7e | [] | no_license | KhanhNguyen1308/THIET_BI_CANH_BAO-BUON_NGU | 3e9f16252d21ff0406e7dba44033320542e80b10 | 88f1e8d037125e403b02ffe4ec0e838447aec7bb | refs/heads/main | 2023-07-16T04:48:36.453245 | 2021-09-06T13:29:34 | 2021-09-06T13:29:34 | 403,448,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,867 | py | def trang_thai_dau(thuoc, mui_ten, gd, R, goc_chinh, goc_nghieng):
if thuoc:
if (gd[1]+R/2)<=mui_ten[1]:
trang_thai = 'Cui'
mode = 1
elif mui_ten[1] <= (gd[1]-R/2):
trang_thai = 'Ngang'
mode = 8
else:
if -20 <= goc_nghieng <= 20:
trang_thai = 'Thang'
mode = 0
elif goc_nghieng < -20:
trang_thai = 'Nghieng phai'
mode = 2
else:
trang_thai = 'Nghieng trai'
mode = 3
else:
if mui_ten[0] <= gd[0] and mui_ten[1] <= gd[1]:
if goc_chinh <= 45:
trang_thai = 'Nhin phai'
mode = 6
else:
trang_thai = 'Ngang'
mode = 8
elif mui_ten[0] > gd[0] and mui_ten[1] <= gd[1]:
if goc_chinh <= 45:
trang_thai = 'Nhin trai'
mode = 7
else:
trang_thai = 'Ngang'
mode = 8
elif mui_ten[0] <= gd[0] and mui_ten[1] > gd[1]:
if -45 <= goc_chinh <= -20:
trang_thai = 'Cui phai'
mode = 4
elif goc_chinh > -20:
trang_thai = 'Nhin phai'
mode = 6
else:
trang_thai = 'Cui'
mode = 1
elif mui_ten[0] > gd[0] and mui_ten[1] > gd[1]:
if 20 <= goc_chinh <= 45:
trang_thai = 'Cui trai'
mode = 5
elif goc_chinh < 20:
trang_thai = 'Nhin trai'
mode = 7
else:
trang_thai = 'Cui'
mode = 1
trang_thai_trc = trang_thai
return trang_thai, mode, trang_thai_trc
def trang_thai_mat(ty_le_mat, ty_le_mat_phai, ty_le_mat_trai, dem, mode, canh_bao, trang_thai_trc):
if mode == 0:
if ty_le_mat <= 0.25:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 1:
if ty_le_mat <= 0.3:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 2:
if ty_le_mat <= 0.28:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
larm = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 3:
if ty_le_mat <= 0.28:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 4:
if ty_le_mat_trai <= 0.28:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 5:
if ty_le_mat_phai <= 0.28:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 6:
if ty_le_mat_trai <= 0.28:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 7:
if ty_le_mat_phai <= 0.28:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
elif mode == 8:
if ty_le_mat <= 0.20:
trang_thai = 'Nham'
dem += 1
if dem >= 20:
canh_bao = True
else:
trang_thai = 'Mo'
dem = 0
canh_bao = False
trang_thai_trc = trang_thai
return trang_thai, trang_thai_trc, dem, canh_bao
def gat_dau(trang_thai_trc, mode, dem, gat_num, trang_thai, canh_bao):
if (trang_thai_trc == 0 or trang_thai_trc == 1) and mode == 0 and trang_thai == "Mo":
trang_thai_trc == mode
gat_num = 0
canh_bao = False
if mode == 1 and trang_thai_trc == 0:
if trang_thai == "Nham":
dem += 1
trang_thai_trc = mode
if mode == 0 and trang_thai_trc == 1:
if dem <= 10 and dem != 0:
gat_num = 1
dem = 0
canh_bao = True
return gat_num, dem, trang_thai_trc, canh_bao | [
"[email protected]"
] | |
d4d4a6a0e44650a41c9924497d399ebb3be68541 | 913c018e5300ee601cacf0cb0efbe9341e00bbe9 | /slides/slide-22.py | f804b444a6c5e1075fc8cd7a652bd8949251e1d5 | [
"Unlicense"
] | permissive | a-abir/visionProcessingCV | 8eeacad7598b25d50bf9af121c6a45b13c3079e9 | 0ed73c8205684abbe80c660e3d59b2d8d9f310f7 | refs/heads/master | 2020-04-13T03:06:26.503007 | 2019-11-09T16:12:58 | 2019-11-09T16:12:58 | 162,921,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,311 | py | import cv2
import numpy as np
# Create an instance of camera 0
cap = cv2.VideoCapture(0)
win = 'Result'
nothing = lambda *args, **kwargs: None
# create window with name win
cv2.namedWindow(win, cv2.WINDOW_AUTOSIZE)
# create trackbars
cv2.createTrackbar('Hue Low', win, 27, 179, nothing)
cv2.createTrackbar('Hue High', win, 40, 179, nothing)
cv2.createTrackbar('Saturation Low', win, 100, 255, nothing)
cv2.createTrackbar('Saturation High', win, 255, 255, nothing)
cv2.createTrackbar('Value Low', win, 100, 255, nothing)
cv2.createTrackbar('Value High', win, 255, 255, nothing)
cv2.createTrackbar('Blur', win, 30, 100, nothing)
x, y, radius = 0, 0, 0
while True:
# Get the image from camera 0
_, image = cap.read()
image = cv2.resize(image, (int(image.shape[1]//2),
int(image.shape[0]//2)))
# show image under window
cv2.imshow("Raw Camera Data", image)
result = cv2.cvtColor(
image,
cv2.COLOR_BGR2HSV
)
# get values from trackbars
HueLow = cv2.getTrackbarPos('Hue Low', win)
HueHigh = cv2.getTrackbarPos('Hue High', win)
SatLow = cv2.getTrackbarPos('Saturation Low', win)
SatHigh = cv2.getTrackbarPos('Saturation High', win)
ValLow = cv2.getTrackbarPos('Value Low', win)
ValHigh = cv2.getTrackbarPos('Value High', win)
Blur = cv2.getTrackbarPos('Blur', win)
Blur = Blur if Blur % 2 == 1 else Blur + 1
# Literal values
HSV_LOW = np.array([HueLow, SatLow, ValLow])
HSV_HIGH = np.array([HueHigh, SatHigh, ValHigh])
# Filter values with mask
mask = cv2.inRange(result, HSV_LOW, HSV_HIGH)
result = cv2.bitwise_and(
result,
result,
mask=mask
)
# Convert result to BGR then to GRAY
result = cv2.cvtColor(
result,
cv2.COLOR_HSV2BGR
)
# create morph kernel
morphkernel = np.ones((1, 1), np.uint8)
# removes specs
result = cv2.morphologyEx(
result, cv2.MORPH_OPEN, morphkernel
)
# removes holes
result = cv2.morphologyEx(
result, cv2.MORPH_CLOSE, morphkernel
)
result = cv2.GaussianBlur(
result, (Blur, Blur), 0
)
# find irregular shapes using mask
contours = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)[1]
# if there is one or more contours
if len(contours) > 0:
# get shape with max area
contour = max(contours, key=cv2.contourArea)
# if that area is large enough
if cv2.contourArea(contour) > 100:
# get the centroid of object
(x, y), radius = cv2.minEnclosingCircle(contour)
center = (int(x), int(y))
radius = int(radius)
# Draw Contour
cv2.drawContours(
result,
[contour],
-10, (0, 0, 255), 4
)
# Draw centroid
cv2.circle(result, center, 10, (255, 0, 0), 20)
cv2.circle(result, center, radius, (0, 255, 0), 4)
if radius > 100:
print("({}, {}), radius: {}".format(int(x), int(y), int(radius)))
# result image under window
cv2.imshow(win, result)
# press 'q' key to break
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# stop
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
0e56df02e997c91c44b884d463bf5eeb9023aca3 | 8a3f18e44cde5a3495e87e50ec629d0dbc36e84f | /week_02/Day_02/i wont cheat on exam.py | 4ce27cc53c65467f26bd732159af4a3aa3f9f9dc | [] | no_license | green-fox-academy/rohanmeshram5 | 649143959bbb220c9716d9e28d46aabb63199989 | eb18a4b2f5e84e7475f0700b0092515f002ca622 | refs/heads/master | 2023-03-12T00:29:00.808599 | 2021-03-03T18:31:59 | 2021-03-03T18:31:59 | 342,660,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | x = "I won't cheat on the exam \n "
print(x*100)
| [
"[email protected]"
] | |
ee993c51f2f4967c8388c89fad243c02b404fb0d | b73104aaee20ca8176bb5b3a85bdad191316793b | /terra_geocrud/migrations/0060_auto_20210122_1132.py | c630746e3bba1a93f2be87c42be33945c6a5d21c | [] | no_license | Terralego/django-terra-geocrud | d28b298d91fef9e0695240beac8dd04e825ea947 | da1e4ffb40220e4a8e76e1ceb75892224869fe53 | refs/heads/master | 2022-07-07T07:15:03.420853 | 2022-06-30T15:56:15 | 2022-06-30T15:56:15 | 198,394,781 | 4 | 1 | null | 2022-06-30T15:49:17 | 2019-07-23T09:10:50 | Python | UTF-8 | Python | false | false | 1,995 | py | # Generated by Django 3.1.5 on 2021-01-22 11:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('geostore', '0044_auto_20201106_1638'),
('terra_geocrud', '0059_auto_20201022_0930'),
]
operations = [
migrations.CreateModel(
name='RoutingSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(help_text='Label that will be shown on the list', max_length=250)),
('provider', models.CharField(choices=[('mapbox', 'Mapbox'), ('geostore', 'Geostore')], help_text="Provider's name", max_length=250)),
('mapbox_transit', models.CharField(blank=True, choices=[('driving', 'Driving'), ('walking', 'Walking'), ('cycling', 'Cycling')], help_text='Mabox transit', max_length=250)),
('crud_view', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='routing_settings', to='terra_geocrud.crudview')),
('layer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='routing_settings', to='geostore.layer')),
],
),
migrations.AddConstraint(
model_name='routingsettings',
constraint=models.UniqueConstraint(condition=models.Q(layer__isnull=False), fields=('provider', 'layer'), name='check_provider_layer'),
),
migrations.AddConstraint(
model_name='routingsettings',
constraint=models.UniqueConstraint(condition=models.Q(_negated=True, mapbox_transit=''), fields=('provider', 'mapbox_transit'), name='check_provider_mapbox_transit'),
),
migrations.AlterUniqueTogether(
name='routingsettings',
unique_together={('label', 'crud_view'), ('layer', 'crud_view')},
),
]
| [
"[email protected]"
] | |
0c08f0bf76cb7425cd21e1b839311f1a6f18fac6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02715/s648085853.py | 1690b0179ea73214c86aabcff11e11b6d66648e5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | MOD = 10**9 + 7
def mod_pow(p, q):
res = 1
while q > 0:
if q & 1:
res = (res * p) % MOD
q //= 2
p = (p * p) % MOD
return res
def solve(n, k):
dp = [0] * (k+1)
ans = 0
for x in range(k, 0, -1):
dp[x] = mod_pow((k // x), n) - sum(dp[y] for y in range(2*x, k+1, x))
dp[x] %= MOD
ans += dp[x] * x
return ans % MOD
n, k = map(int, input().split())
print(solve(n, k)) | [
"[email protected]"
] | |
cc1acebbeb60903b4eecac542d49dafb365417e2 | 5e6b385cf34605e9fd606cf8ca70171120c9a789 | /urls.py | 8af044f6507623d51f011b4dbd431366bf082d09 | [] | no_license | jeffedlund/astrokit | 6a8a361ba508a630fb0d0e29bbd389a9fe931650 | bae69b88f6bf2b6cd4a307a1fd7342613eb1981e | refs/heads/master | 2020-07-10T11:18:59.539234 | 2016-06-26T22:51:44 | 2016-06-26T22:51:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | """astrokit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^s3upload/', include('s3direct.urls')),
url(r'^', include('imageflow.urls')),
]
| [
"[email protected]"
] | |
f2fd460bcc1d9881b286527a314b6aa57eb2f47c | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderBeginnerContest/2XX/250/A.py | 16a0a7a5857fb59f580771bb332e93d03882271f | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 743 | py | # import sys
# sys.setrecursionlimit(10 ** 6)
# # for pypy
# import pypyjit
# pypyjit.set_param('max_unroll_recursion=-1')
# import bisect
# from collections import deque
# import string
from math import ceil, floor
inf = float('inf')
mod = 10 ** 9 + 7
mod2 = 998244353
# from decorator import stop_watch
#
#
# @stop_watch
def solve(H,W,R,C):
print((0 if H == 1 else 2 if 1 < R < H else 1) +
(0 if W == 1 else 2 if 1 < C < W else 1))
if __name__ == '__main__':
H, W = map(int, input().split())
R, C = map(int, input().split())
solve(H,W,R,C)
# # test
# from random import randint
# import string
# import tool.testcase as tt
# from tool.testcase import random_str, random_ints
# solve()
| [
"[email protected]"
] | |
170d4bc8e9ff5699921eb404d75bb4f01be51dd0 | be651253d2aabbf2f5481bbb9f806d85de3c87b8 | /3.0 Attributes and Methods - Lab/03. Calculator.py | 9fd25a160da08ad67f086b7201b1cf06a74e0177 | [] | no_license | byAbaddon/OOP-Course-PYTHON-May-2020 | 49eed98885e610a9aa4f74cc13fee43c78a64ce3 | ee8bdeef9ff15733d11127af45397b723d123a7a | refs/heads/main | 2023-06-24T01:40:10.272556 | 2021-07-25T00:42:16 | 2021-07-25T00:42:16 | 381,504,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from functools import reduce
class Calculator:
@staticmethod
def add(*args):
return sum(list(args))
@staticmethod
def multiply(*args):
return reduce(lambda a, x: a * x, args)
@staticmethod
def divide(*args):
return reduce(lambda a, x: a / x, args)
@staticmethod
def subtract(*args):
return reduce(lambda a, x: a - x, args)
| [
"[email protected]"
] | |
3017088eeb0b53c19b693decc4dee0679ff5350b | 5ff73a257eed74de87c0279c69552c19420fcc7d | /venv/bin/restauth-user.py | 51addc518270df04d9c412d7b8681ce194d830b0 | [] | no_license | GanapathiAmbore/api_auth_pro | 9109f4fbd50ae0225875daa3f82418b7c9aa5381 | d98e3cf1cade4c9b461fe298f94bdc38625c06aa | refs/heads/master | 2022-06-13T08:31:49.728775 | 2019-07-16T05:16:37 | 2019-07-16T05:16:37 | 196,578,277 | 0 | 0 | null | 2022-04-22T21:55:26 | 2019-07-12T12:47:44 | Python | UTF-8 | Python | false | false | 4,492 | py | #!/home/ganapathi/PycharmProjects/authpro/venv/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of RestAuth (https://restauth.net).
#
# RestAuth is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# RestAuth is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with RestAuth. If not,
# see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import sys
import getpass
from pkg_resources import DistributionNotFound
from pkg_resources import Requirement
from pkg_resources import resource_filename
# Setup environment
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RestAuth.settings')
sys.path.append(os.getcwd())
try:
req = Requirement.parse("RestAuth")
path = resource_filename(req, 'RestAuth')
if os.path.exists(path): # pragma: no cover
sys.path.insert(0, path)
except DistributionNotFound:
pass # we're run in a not-installed environment
try:
from django.utils import six
from Services.models import Service
from Users.cli.parsers import parser
from backends import user_backend
from backends import property_backend
from backends import group_backend
from common.errors import UserExists
except ImportError as e: # pragma: no cover
sys.stderr.write(
'Error: Cannot import RestAuth. Please make sure RestAuth is in your PYTHONPATH.\n')
sys.exit(1)
def main(args=None):
# parse arguments
args = parser.parse_args(args=args)
if args.action == 'add':
password = args.get_password(args)
if args.password_generated:
print(args.pwd)
user_backend.set_password(args.user.username, password)
elif args.action in ['ls', 'list']:
for username in sorted(user_backend.list()):
if six.PY3: # pragma: py3
print(username)
else: # pragma: py2
print(username.encode('utf-8'))
elif args.action == 'verify':
if not args.pwd: # pragma: no cover
args.pwd = getpass.getpass('password: ')
if user_backend.check_password(args.user.username, args.pwd):
print('Ok.')
else:
print('Failed.')
sys.exit(1)
elif args.action == 'set-password':
password = args.get_password(args)
if args.password_generated:
print(args.pwd)
user_backend.set_password(args.user.username, args.pwd)
elif args.action == 'view':
props = property_backend.list(args.user)
if 'date joined' in props:
print('Joined: %s' % props['date joined'])
if 'last login' in props:
print('Last login: %s' % props['last login'])
if args.service:
groups = group_backend.list(service=args.service, user=args.user)
if groups:
print('Groups: %s' % ', '.join(sorted(groups)))
else:
print('No groups.')
else:
groups = {}
none_groups = group_backend.list(service=None, user=args.user)
for service in Service.objects.all():
subgroups = group_backend.list(service=service, user=args.user)
if subgroups:
groups[service.username] = subgroups
if groups or none_groups:
print('Groups:')
if none_groups:
print('* no service: %s' % ', '.join(sorted(none_groups)))
for service, groups in sorted(groups.items(), key=lambda t: t[0]):
print('* %s: %s' % (service, ', '.join(sorted(groups))))
else:
print('No groups.')
elif args.action == 'rename':
try:
user_backend.rename(args.user.username, args.name)
except UserExists as e:
parser.error("%s: %s" % (args.name if six.PY3 else args.name.decode('utf-8'), e))
elif args.action in ['delete', 'rm', 'remove']: # pragma: no branch
user_backend.remove(args.user.username)
if __name__ == '__main__': # pragma: no cover
main()
| [
"[email protected]"
] | |
11c17b259d3788d26583fa112d9f1480f7a7ddc9 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/keyword_plan_campaign_service/transports/base.py | f3002492db3249e0f4e6c4c79ceb128094274fca | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,311 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v6.resources.types import keyword_plan_campaign
from google.ads.googleads.v6.services.types import keyword_plan_campaign_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class KeywordPlanCampaignServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for KeywordPlanCampaignService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_keyword_plan_campaign: gapic_v1.method.wrap_method(
self.get_keyword_plan_campaign,
default_timeout=None,
client_info=client_info,
),
self.mutate_keyword_plan_campaigns: gapic_v1.method.wrap_method(
self.mutate_keyword_plan_campaigns,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_keyword_plan_campaign(self) -> typing.Callable[
[keyword_plan_campaign_service.GetKeywordPlanCampaignRequest],
keyword_plan_campaign.KeywordPlanCampaign]:
raise NotImplementedError
@property
def mutate_keyword_plan_campaigns(self) -> typing.Callable[
[keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest],
keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse]:
raise NotImplementedError
__all__ = (
'KeywordPlanCampaignServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
13aae462a5aafa015fb82c45d2a91d4661c6d84d | 3312b5066954cbf96c79ef3e1f3d582b31ebc5ae | /colegend/academy/admin.py | 33500be58723d3916ead15c076b6f986af5748b9 | [] | no_license | Eraldo/colegend | d3f3c2c37f3bade7a3a1e10d307d49db225fe7f5 | 2e7b9d27887d7663b8d0d1930c2397c98e9fa1fc | refs/heads/master | 2021-01-16T23:32:09.245967 | 2020-10-07T12:12:14 | 2020-10-07T12:12:14 | 21,119,074 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Book, BookReview, BookTag
class TaggedBookInline(admin.TabularInline):
verbose_name = _('tagged book')
verbose_name_plural = _('tagged books')
model = Book.tags.through
extra = 0
show_change_link = True
class BookReviewInline(admin.TabularInline):
fields = ['owner', 'rating', 'area_1', 'area_2', 'area_3', 'area_4', 'area_5', 'area_6', 'area_7']
model = BookReview
extra = 0
show_change_link = True
@admin.register(BookTag)
class BookTagAdmin(admin.ModelAdmin):
inlines = [TaggedBookInline]
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ['name', 'author', 'rating', 'public', 'featured']
list_filter = ['public', 'featured', 'tags']
list_editable = ['public']
filter_horizontal = ['tags']
readonly_fields = ['created']
search_fields = ['name', 'author']
inlines = [BookReviewInline]
@admin.register(BookReview)
class BookReviewAdmin(admin.ModelAdmin):
list_display = ['book', 'owner']
list_filter = ['owner', 'book']
readonly_fields = ['created']
| [
"[email protected]"
] | |
61eba779f71680917adcbb59e6fbd106e63422fa | 481b7922a39c514e12087b8fde1e6595315ecaa3 | /notifications/models.py | b9f022a3adbb347eea69bd15d618db4e150a04e3 | [] | no_license | Zolo-2000/mape-1 | 3ad994b285229924f23a3a895a925b7551ba3ebd | 87651ff08590ff5dd2685fa08956632493ee97c9 | refs/heads/master | 2021-01-19T22:21:46.884952 | 2017-08-31T16:23:57 | 2017-08-31T16:23:57 | 88,803,138 | 0 | 0 | null | 2017-04-20T00:27:07 | 2017-04-20T00:27:07 | null | UTF-8 | Python | false | false | 517 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from social.models import Profile
class Notification(models.Model):
TYPE_CHOICES = (
(1, 'Like'),
(2, 'Amistad'),
(3, 'Invitacion'),
)
from_profile = models.ForeignKey(Profile)
to_profile = models.ForeignKey(Profile, related_name='notifications')
message = models.TextField(blank=True, null=True)
type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES)
date = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
f211453dbbccbeb4172dfdc05ad63a35038e47f2 | ddd7e91dae17664505ea4f9be675e125337347a2 | /unused/2014/distributed/find_optimal_transformation_distributor_script.py | 47787e7d5e351e7c76dd664da9c1f36153dabe6b | [] | no_license | akurnikova/MouseBrainAtlas | 25c4134bae53827167e4b54ba83f215aec9f2d85 | ed1b5858467febdaed0a58a1a742764d214cc38e | refs/heads/master | 2021-07-15T17:17:19.881627 | 2019-02-22T06:00:17 | 2019-02-22T06:00:17 | 103,425,463 | 0 | 0 | null | 2018-04-27T19:08:02 | 2017-09-13T16:45:56 | Jupyter Notebook | UTF-8 | Python | false | false | 299 | py | import os
import time
from preprocess_utility import *
t = time.time()
script_root = os.environ['GORDON_REPO_DIR']+'/notebooks/'
arg_tuples = [[i] for i in range(8)]
run_distributed3(script_root+'/find_optimal_transformation_executable.py', arg_tuples)
print 'total', time.time() - t, 'seconds'
| [
"[email protected]"
] | |
2a8ef7d09dd39f175ff581175473aaecc29aa67b | c8a38e65e71de888fc5b22fbd027bbaa0f3f6ef1 | /classic/MergeSort.py | e90a38bdccd7b2e70d6a29f8bfe43220a25d24fa | [] | no_license | skywhat/leetcode | e451a10cdab0026d884b8ed2b03e305b92a3ff0f | 6aaf58b1e1170a994affd6330d90b89aaaf582d9 | refs/heads/master | 2023-03-30T15:54:27.062372 | 2023-03-30T06:51:20 | 2023-03-30T06:51:20 | 90,644,891 | 82 | 27 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | def mergeSort(s, l, r):
if l>=r:
return
mid = (l+r)/2
mergeSort(s, l, mid)
mergeSort(s, mid+1, r)
merge(s, l, mid, r)
def merge(s, l, mid, r):
t = []
i = l
j = mid+1
while i<=mid and j<=r:
if s[i] < s[j]:
t.append(s[i])
i+=1
else:
t.append(s[j])
j+=1
while i<=mid:
t.append(s[i])
i+=1
while j<=r:
t.append(s[j])
j+=1
for i, x in enumerate(t):
s[l+i] = x
if __name__=="__main__":
s = [3,5,1,2,7,8,10,0]
mergeSort(s, 0, len(s)-1)
print s
s = [9,8,7,6,5,4,3,2,1]
mergeSort(s, 0, len(s)-1)
print s
| [
"[email protected]"
] | |
9b47587d53e94ef38b76436bc239d8c7de43f70f | 20d8a89124008c96fa59225926ce39f113522daa | /UL_NanoAODv8/2018/step7_cfg.py | 796ed2f73ea4ff4cc4240bf4fcdd4341af58cff3 | [] | no_license | MiT-HEP/MCProduction | 113a132a2ff440e13225be518ff8d52b0136e1eb | df019d7a15717a9eafd9502f2a310023dcd584f5 | refs/heads/master | 2022-05-06T20:25:34.372363 | 2022-04-12T11:55:15 | 2022-04-12T11:55:15 | 37,586,559 | 5 | 7 | null | 2015-08-24T11:13:58 | 2015-06-17T09:45:12 | Python | UTF-8 | Python | false | false | 3,932 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: --python_filename step7_cfg.py --eventcontent NANOAODSIM --customise Configuration/DataProcessing/Utils.addMonitoring --datatier NANOAODSIM --fileout file:step7.root --conditions 106X_upgrade2018_realistic_v15_L1v1 --step NANO --filein file:step6.root --era Run2_2018,run2_nanoAOD_106Xv1 --no_exec --mc -n 500
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
from Configuration.Eras.Modifier_run2_nanoAOD_106Xv1_cff import run2_nanoAOD_106Xv1
process = cms.Process('NANO',Run2_2018,run2_nanoAOD_106Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:step6.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('--python_filename nevts:500'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODSIMoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAODSIM'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:step7.root'),
outputCommands = process.NANOAODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '106X_upgrade2018_realistic_v15_L1v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequenceMC)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODSIMoutput_step = cms.EndPath(process.NANOAODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeMC
#call to customisation function nanoAOD_customizeMC imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"[email protected]"
] | |
ed568aebf7ccfb7ab927a70bb4f56f29f3aadebe | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2908/61519/270339.py | 01ee9a6039d0bea096c53e730e0059e37ea1e6bc | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | n=int(input())
a=[]
b=[]
for i in range(n):
s=input()
a.append(s)
for i in range(len(a)):
tem=list(a[i])
tem.sort()
a[i]="".join(tem)
b.append(a[0])
for i in range(len(a)):
if a[i] not in b:
b.append(a[i])
print(len(b)) | [
"[email protected]"
] | |
e18d08fc75cb6a5c7e65786034508a5a6dfae2fb | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /prison-cells-after-n-days/prison-cells-after-n-days.py | 5f5774ff8645fab995636b2a9afdbffacd5365cd | [
"Apache-2.0"
] | permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | // https://leetcode.com/problems/prison-cells-after-n-days
class Solution(object):
def prisonAfterNDays(self, cells, N):
"""
:type cells: List[int]
:type N: int
:rtype: List[int]
"""
cells = [-1]+ cells+[-1]
N -= max(N - 1, 0) / 14 * 14
for _ in range(N):
newcells = cells[:]
for i in range(1, len(cells)-1):
if (cells[i-1] == 0 and cells[i+1] == 0) or (cells[i-1] == 1 and cells[i+1] == 1):
newcells[i] = 1
else:
newcells[i] = 0
cells = newcells
return cells[1:-1] | [
"[email protected]"
] | |
b5cd2ddf1a640e787b53681fe898447ac3fc4544 | 78b8f4f4205350abe9cf8a8b26ba756f65e61a42 | /samples/openapi3/client/petstore/python-nextgen-aiohttp/petstore_api/configuration.py | 12e9a7ef799f6486f58dbd8aeacc0b242fd13071 | [
"Apache-2.0"
] | permissive | borsch/openapi-generator | 9c3791e1ce5356bc64a13a87938a0d030f379c5f | 6d48d7342112f3a4d1c543054575a21424ec8f45 | refs/heads/master | 2023-06-10T22:26:43.941807 | 2023-03-01T07:30:18 | 2023-03-01T07:30:18 | 195,118,780 | 0 | 2 | Apache-2.0 | 2023-05-30T03:57:34 | 2019-07-03T19:57:40 | Java | UTF-8 | Python | false | false | 20,905 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import sys
import urllib3
import http.client as httplib
from petstore_api.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url.
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer).
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication.
:param password: Password for HTTP basic authentication.
:param access_token: Access token.
:param signing_info: Configuration parameters for the HTTP signature security scheme.
Must be an instance of petstore_api.signing.HttpSigningConfiguration
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:param ssl_ca_cert: str - the path to a file of concatenated CA certificates
in PEM format.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = petstore_api.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
HTTP Basic Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
http_basic_auth:
type: http
scheme: basic
Configure API client with HTTP basic authentication:
conf = petstore_api.Configuration(
username='the-user',
password='the-password',
)
HTTP Signature Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
http_basic_auth:
type: http
scheme: signature
Configure API client with HTTP signature authentication. Use the 'hs2019' signature scheme,
sign the HTTP requests with the RSA-SSA-PSS signature algorithm, and set the expiration time
of the signature to 5 minutes after the signature has been created.
Note you can use the constants defined in the petstore_api.signing module, and you can
also specify arbitrary HTTP headers to be included in the HTTP signature, except for the
'Authorization' header, which is used to carry the signature.
One may be tempted to sign all headers by default, but in practice it rarely works.
This is because explicit proxies, transparent proxies, TLS termination endpoints or
load balancers may add/modify/remove headers. Include the HTTP headers that you know
are not going to be modified in transit.
conf = petstore_api.Configuration(
signing_info = petstore_api.signing.HttpSigningConfiguration(
key_id = 'my-key-id',
private_key_path = 'rsa.pem',
signing_scheme = petstore_api.signing.SCHEME_HS2019,
signing_algorithm = petstore_api.signing.ALGORITHM_RSASSA_PSS,
signed_headers = [petstore_api.signing.HEADER_REQUEST_TARGET,
petstore_api.signing.HEADER_CREATED,
petstore_api.signing.HEADER_EXPIRES,
petstore_api.signing.HEADER_HOST,
petstore_api.signing.HEADER_DATE,
petstore_api.signing.HEADER_DIGEST,
'Content-Type',
'User-Agent'
],
signature_max_validity = datetime.timedelta(minutes=5)
)
)
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
username=None, password=None,
access_token=None,
signing_info=None,
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
ssl_ca_cert=None,
):
"""Constructor
"""
self._base_path = "http://petstore.swagger.io:80/v2" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.access_token = access_token
"""Access token
"""
if signing_info is not None:
signing_info.host = host
self.signing_info = signing_info
"""The HTTP signing configuration
"""
self.access_token = None
"""access token for OAuth/Bearer
"""
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("petstore_api")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = ssl_ca_cert
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = 100
"""This value is passed to the aiohttp to limit simultaneous connections.
Default values is 100, None means no-limit.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
self.socket_options = None
"""Options to pass down to the underlying urllib3 socket
"""
self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z"
"""datetime format
"""
self.date_format = "%Y-%m-%d"
"""date format
"""
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == "signing_info" and value is not None:
# Ensure the host parameter from signing info is the same as
# Configuration.host.
value.host = self.host
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = default
@classmethod
def get_default_copy(cls):
"""Deprecated. Please use `get_default` instead.
Deprecated. Please use `get_default` instead.
:return: The configuration object.
"""
return cls.get_default()
@classmethod
def get_default(cls):
"""Return the default configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration.
:return: The configuration object.
"""
if cls._default is None:
cls._default = Configuration()
return cls._default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if self.access_token is not None:
auth['petstore_auth'] = {
'type': 'oauth2',
'in': 'header',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
}
if 'api_key' in self.api_key:
auth['api_key'] = {
'type': 'api_key',
'in': 'header',
'key': 'api_key',
'value': self.get_api_key_with_prefix(
'api_key',
),
}
if 'api_key_query' in self.api_key:
auth['api_key_query'] = {
'type': 'api_key',
'in': 'query',
'key': 'api_key_query',
'value': self.get_api_key_with_prefix(
'api_key_query',
),
}
if self.username is not None and self.password is not None:
auth['http_basic_test'] = {
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
}
if self.access_token is not None:
auth['bearer_test'] = {
'type': 'bearer',
'in': 'header',
'format': 'JWT',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
}
if self.signing_info is not None:
auth['http_signature_test'] = {
'type': 'http-signature',
'in': 'header',
'key': 'Authorization',
'value': None # Signature headers are calculated for every HTTP request
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "http://{server}.swagger.io:{port}/v2",
'description': "petstore server",
'variables': {
'server': {
'description': "No description provided",
'default_value': "petstore",
'enum_values': [
"petstore",
"qa-petstore",
"dev-petstore"
]
},
'port': {
'description': "No description provided",
'default_value': "80",
'enum_values': [
"80",
"8080"
]
}
}
},
{
'url': "https://localhost:8080/{version}",
'description': "The local server",
'variables': {
'version': {
'description': "No description provided",
'default_value': "v2",
'enum_values': [
"v1",
"v2"
]
}
}
},
{
'url': "https://127.0.0.1/no_varaible",
'description': "The local server without variables",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
| [
"[email protected]"
] | |
31a9e2a3ce29989e3919b81f3ac99a0aa1d53099 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2806/59018/240055.py | 407238dcea42d1b7be172e0c494ea3bb4c414b77 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | n=int(input())
a=[]
p=[]
for i in range(n):
m,k=input().split(' ')
a.appennd(int(m))
p.append(int(k))
count=0
for j in range(n):
count=count+a[i]*min(p[0:i+1])
print(count)
| [
"[email protected]"
] | |
56ebe08be3499a23b065f9b952dadc0a413e0d4d | 93f47ba04fc18c4e537f0a48fe6232e2a89a4d30 | /examples/adspygoogle/dfp/v201408/placement_service/update_placements.py | 4a5f1ecff2f2c3ac09e479873dc7a7e90ef3069a | [
"Apache-2.0"
] | permissive | jasonshih/googleads-python-legacy-lib | c56dc52a1dab28b9de461fd5db0fcd6020b84a04 | 510fad41ecf986fe15258af64b90f99a96dc5548 | refs/heads/master | 2021-04-30T22:12:12.900275 | 2015-03-06T15:35:21 | 2015-03-06T15:35:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a single placement to allow for AdSense targeting.
To determine which placements exist, run get_all_placements.py.
"""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
PLACEMENT_ID = 'INSERT_PLACEMENT_ID_HERE'
def main(client, placement_id):
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201408')
# Create query.
values = [{
'key': 'placementId',
'value': {
'xsi_type': 'NumberValue',
'value': placement_id
}
}]
query = 'WHERE id = :placementId'
statement = DfpUtils.FilterStatement(query, values, 1)
# Get placements by statement.
placements = placement_service.GetPlacementsByStatement(
statement.ToStatement())[0]
for placement in placements:
if not placement['targetingDescription']:
placement['targetingDescription'] = 'Generic description'
placement['targetingAdLocation'] = 'All images on sports pages.'
placement['targetingSiteName'] = 'http://code.google.com'
placement['isAdSenseTargetingEnabled'] = 'true'
# Update placements remotely.
placements = placement_service.UpdatePlacements(placements)
for placement in placements:
print ('Placement with id \'%s\', name \'%s\', and AdSense targeting '
'enabled \'%s\' was updated.'
% (placement['id'], placement['name'],
placement['isAdSenseTargetingEnabled']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, PLACEMENT_ID)
| [
"[email protected]"
] | |
792cf50359d4bf5956925e7c59e425b1dcb9996e | 45a00518abed3ef4796655d8d2a0677f29961aa3 | /example 46/python_venv/lib/python3.8/site-packages/pandas/tests/io/test_parquet.py | 92c86e32ed580a8e052e5a13029d3ce2b96d4624 | [] | no_license | ruiwu1990/CSCI_4710_6710 | 07b92e456d6cda3e63a5b5d078c1718110317555 | 6e32c89ef70fbe4b4a5db14682dc94b13bab6d9e | refs/heads/master | 2023-05-03T21:50:54.943702 | 2023-04-18T21:48:43 | 2023-04-18T21:48:43 | 174,882,138 | 9 | 17 | null | 2023-05-01T20:53:06 | 2019-03-10T21:18:01 | Python | UTF-8 | Python | false | false | 43,737 | py | """ test parquet compat """
import datetime
from io import BytesIO
import os
import pathlib
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._config import get_option
from pandas.compat import is_platform_windows
from pandas.compat.pyarrow import (
pa_version_under7p0,
pa_version_under8p0,
)
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.util.version import Version
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
# TODO(ArrayManager) fastparquet relies on BlockManager internals
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET or get_option("mode.data_manager") == "array",
reason="fastparquet is not installed or ArrayManager is used",
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
elif get_option("mode.data_manager") == "array":
pytest.skip("ArrayManager is not supported with fastparquet")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
@pytest.fixture(
params=[
datetime.datetime.now(datetime.timezone.utc),
datetime.datetime.now(datetime.timezone.min),
datetime.datetime.now(datetime.timezone.max),
datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"),
]
)
def timezone_aware_date_list(request):
return request.param
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
check_dtype=True,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected,
actual,
check_names=check_names,
check_like=check_like,
check_dtype=check_dtype,
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def check_partition_names(path, expected):
"""Check partitions of a parquet file are as expected.
Parameters
----------
path: str
Path of the dataset.
expected: iterable of str
Expected partition names.
"""
if pa_version_under7p0:
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == len(expected)
assert dataset.partitions.partition_names == set(expected)
else:
import pyarrow.dataset as ds
dataset = ds.dataset(path, partitioning="hive")
assert dataset.partitioning.schema.names == expected
def test_invalid_engine(df_compat):
msg = "engine must be one of 'pyarrow', 'fastparquet'"
with pytest.raises(ValueError, match=msg):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else Version(pyarrow.__version__) < Version(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else Version(fastparquet.__version__) < Version(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc, err_msg):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc, match=err_msg):
to_parquet(df, path, engine, compression=None)
def check_external_error_on_write(self, df, engine, exc):
# check that an external library is raising the exception on writing
with tm.ensure_clean() as path:
with tm.external_error_raised(exc):
to_parquet(df, path, engine, compression=None)
@pytest.mark.network
@tm.network(
url=(
"https://raw.githubusercontent.com/pandas-dev/pandas/"
"main/pandas/tests/io/data/parquet/simple.parquet"
),
check_before_test=True,
)
def test_parquet_read_from_url(self, df_compat, engine):
if engine != "auto":
pytest.importorskip(engine)
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/"
"main/pandas/tests/io/data/parquet/simple.parquet"
)
df = read_parquet(url)
tm.assert_frame_equal(df, df_compat)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
msg = "to_parquet only supports IO with DataFrames"
self.check_error_on_write(obj, engine, ValueError, msg)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
df.index = df.index._with_freq(None) # freq doesn't round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
def test_write_column_multiindex(self, engine):
# Not able to write column multi-indexes with non-string column names.
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
if engine == "fastparquet":
self.check_error_on_write(
df, engine, TypeError, "Column name must be a string"
)
elif engine == "pyarrow":
check_round_trip(df, engine)
def test_write_column_multiindex_nonstring(self, engine):
# GH #34777
# Not able to write column multi-indexes with non-string column names
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
[1, 2, 1, 2, 1, 2, 1, 2],
]
df = pd.DataFrame(np.random.randn(8, 8), columns=arrays)
df.columns.names = ["Level1", "Level2"]
if engine == "fastparquet":
if Version(fastparquet.__version__) < Version("0.7.0"):
err = TypeError
else:
err = ValueError
self.check_error_on_write(df, engine, err, "Column name")
elif engine == "pyarrow":
check_round_trip(df, engine)
def test_write_column_multiindex_string(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column multi-indexes with string column names
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(np.random.randn(8, 8), columns=arrays)
df.columns.names = ["ColLevel1", "ColLevel2"]
check_round_trip(df, engine)
def test_write_column_index_string(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column indexes with string column names
arrays = ["bar", "baz", "foo", "qux"]
df = pd.DataFrame(np.random.randn(8, 4), columns=arrays)
df.columns.name = "StringCol"
check_round_trip(df, engine)
def test_write_column_index_nonstring(self, engine):
# GH #34777
# Write column indexes with string column names
arrays = [1, 2, 3, 4]
df = pd.DataFrame(np.random.randn(8, 4), columns=arrays)
df.columns.name = "NonStringCol"
if engine == "fastparquet":
self.check_error_on_write(
df, engine, TypeError, "Column name must be a string"
)
else:
check_round_trip(df, engine)
@pytest.mark.skipif(pa_version_under7p0, reason="minimum pyarrow not installed")
def test_dtype_backend(self, engine, request):
import pyarrow.parquet as pq
if engine == "fastparquet":
# We are manually disabling fastparquet's
# nullable dtype support pending discussion
mark = pytest.mark.xfail(
reason="Fastparquet nullable dtype support is disabled"
)
request.node.add_marker(mark)
table = pyarrow.table(
{
"a": pyarrow.array([1, 2, 3, None], "int64"),
"b": pyarrow.array([1, 2, 3, None], "uint8"),
"c": pyarrow.array(["a", "b", "c", None]),
"d": pyarrow.array([True, False, True, None]),
# Test that nullable dtypes used even in absence of nulls
"e": pyarrow.array([1, 2, 3, 4], "int64"),
# GH 45694
"f": pyarrow.array([1.0, 2.0, 3.0, None], "float32"),
"g": pyarrow.array([1.0, 2.0, 3.0, None], "float64"),
}
)
with tm.ensure_clean() as path:
# write manually with pyarrow to write integers
pq.write_table(table, path)
result1 = read_parquet(path, engine=engine)
result2 = read_parquet(path, engine=engine, dtype_backend="numpy_nullable")
assert result1["a"].dtype == np.dtype("float64")
expected = pd.DataFrame(
{
"a": pd.array([1, 2, 3, None], dtype="Int64"),
"b": pd.array([1, 2, 3, None], dtype="UInt8"),
"c": pd.array(["a", "b", "c", None], dtype="string"),
"d": pd.array([True, False, True, None], dtype="boolean"),
"e": pd.array([1, 2, 3, 4], dtype="Int64"),
"f": pd.array([1.0, 2.0, 3.0, None], dtype="Float32"),
"g": pd.array([1.0, 2.0, 3.0, None], dtype="Float64"),
}
)
if engine == "fastparquet":
# Fastparquet doesn't support string columns yet
# Only int and boolean
result2 = result2.drop("c", axis=1)
expected = expected.drop("c", axis=1)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"dtype",
[
"Int64",
"UInt8",
"boolean",
"object",
"datetime64[ns, UTC]",
"float",
"period[D]",
"Float64",
"string",
],
)
def test_read_empty_array(self, pa, dtype):
# GH #41241
df = pd.DataFrame(
{
"value": pd.array([], dtype=dtype),
}
)
# GH 45694
expected = None
if dtype == "float":
expected = pd.DataFrame(
{
"value": pd.array([], dtype="Float64"),
}
)
check_round_trip(
df, pa, read_kwargs={"dtype_backend": "numpy_nullable"}, expected=expected
)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
dti = dti._with_freq(None) # freq doesn't round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_to_bytes_without_path_or_buf_provided(self, pa, df_full):
# GH 37105
buf_bytes = df_full.to_parquet(engine=pa)
assert isinstance(buf_bytes, bytes)
buf_stream = BytesIO(buf_bytes)
res = read_parquet(buf_stream)
tm.assert_frame_equal(df_full, res)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError, "Duplicate column names found")
def test_timedelta(self, pa):
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
if pa_version_under8p0:
self.check_external_error_on_write(df, pa, NotImplementedError)
else:
check_round_trip(df, pa)
def test_unsupported(self, pa):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
def test_unsupported_float16(self, pa):
# #44847, #44914
# Not able to write float 16 column using pyarrow.
data = np.arange(2, 10, dtype=np.float16)
df = pd.DataFrame(data=data, columns=["fp16"])
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
@pytest.mark.xfail(
is_platform_windows(),
reason=(
"PyArrow does not cleanup of partial files dumps when unsupported "
"dtypes are passed to_parquet function in windows"
),
)
@pytest.mark.parametrize("path_type", [str, pathlib.Path])
def test_unsupported_float16_cleanup(self, pa, path_type):
# #44847, #44914
# Not able to write float 16 column using pyarrow.
# Tests cleanup by pyarrow in case of an error
data = np.arange(2, 10, dtype=np.float16)
df = pd.DataFrame(data=data, columns=["fp16"])
with tm.ensure_clean() as path_str:
path = path_type(path_str)
with tm.external_error_raised(pyarrow.ArrowException):
df.to_parquet(path=path, engine=pa)
assert not os.path.isfile(path)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame()
df["a"] = pd.Categorical(list("abcdef"))
# test for null, out-of-order values, and unobserved category
df["b"] = pd.Categorical(
["bar", "foo", "foo", "bar", None, "bar"],
dtype=pd.CategoricalDtype(["foo", "bar", "baz"]),
)
# test for ordered flag
df["c"] = pd.Categorical(
["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True
)
check_round_trip(df, pa)
@pytest.mark.single_cpu
def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so):
s3fs = pytest.importorskip("s3fs")
s3 = s3fs.S3FileSystem(**s3so)
kw = {"filesystem": s3}
check_round_trip(
df_compat,
pa,
path="pandas-test/pyarrow.parquet",
read_kwargs=kw,
write_kwargs=kw,
)
@pytest.mark.single_cpu
def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):
# GH #19134
s3so = {"storage_options": s3so}
check_round_trip(
df_compat,
pa,
path="s3://pandas-test/pyarrow.parquet",
read_kwargs=s3so,
write_kwargs=s3so,
)
@pytest.mark.single_cpu
@td.skip_if_no("s3fs") # also requires flask
@pytest.mark.parametrize(
"partition_col",
[
["A"],
[],
],
)
def test_s3_roundtrip_for_dir(
self, df_compat, s3_resource, pa, partition_col, s3so
):
# GH #26388
expected_df = df_compat.copy()
# GH #35791
if partition_col:
expected_df = expected_df.astype(dict.fromkeys(partition_col, np.int32))
partition_col_type = "category"
expected_df[partition_col] = expected_df[partition_col].astype(
partition_col_type
)
check_round_trip(
df_compat,
pa,
expected=expected_df,
path="s3://pandas-test/parquet_dir",
read_kwargs={"storage_options": s3so},
write_kwargs={
"partition_cols": partition_col,
"compression": None,
"storage_options": s3so,
},
check_like=True,
repeat=1,
)
@td.skip_if_no("pyarrow")
def test_read_file_like_obj_support(self, df_compat):
buffer = BytesIO()
df_compat.to_parquet(buffer)
df_from_buf = read_parquet(buffer)
tm.assert_frame_equal(df_compat, df_from_buf)
@td.skip_if_no("pyarrow")
def test_expand_user(self, df_compat, monkeypatch):
monkeypatch.setenv("HOME", "TestingUser")
monkeypatch.setenv("USERPROFILE", "TestingUser")
with pytest.raises(OSError, match=r".*TestingUser.*"):
read_parquet("~/file.parquet")
with pytest.raises(OSError, match=r".*TestingUser.*"):
df_compat.to_parquet("~/file.parquet")
def test_partition_cols_supported(self, tmp_path, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None)
check_partition_names(tmp_path, partition_cols)
assert read_parquet(tmp_path).shape == df.shape
def test_partition_cols_string(self, tmp_path, pa, df_full):
# GH #27117
partition_cols = "bool"
partition_cols_list = [partition_cols]
df = df_full
df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None)
check_partition_names(tmp_path, partition_cols_list)
assert read_parquet(tmp_path).shape == df.shape
@pytest.mark.parametrize(
"path_type", [str, lambda x: x], ids=["string", "pathlib.Path"]
)
def test_partition_cols_pathlib(self, tmp_path, pa, df_compat, path_type):
# GH 35902
partition_cols = "B"
partition_cols_list = [partition_cols]
df = df_compat
path = path_type(tmp_path)
df.to_parquet(path, partition_cols=partition_cols_list)
assert read_parquet(path).shape == df.shape
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame(index=[], columns=[])
check_round_trip(df, pa)
def test_write_with_schema(self, pa):
import pyarrow
df = pd.DataFrame({"x": [0, 1]})
schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())])
out_df = df.astype(bool)
check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df)
@td.skip_if_no("pyarrow")
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="UInt32"),
"c": pd.Series(["a", None, "c"], dtype="string"),
}
)
check_round_trip(df, pa)
df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
check_round_trip(df, pa)
@td.skip_if_no("pyarrow")
def test_pyarrow_backed_string_array(self, pa, string_storage):
# test ArrowStringArray supported through the __arrow_array__ protocol
df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")})
with pd.option_context("string_storage", string_storage):
check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]"))
@td.skip_if_no("pyarrow")
def test_additional_extension_types(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol + by defining a custom ExtensionType
df = pd.DataFrame(
{
"c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]),
"d": pd.period_range("2012-01-01", periods=3, freq="D"),
# GH-45881 issue with interval with datetime64[ns] subtype
"e": pd.IntervalIndex.from_breaks(
pd.date_range("2012-01-01", periods=4, freq="D")
),
}
)
check_round_trip(df, pa)
def test_timestamp_nanoseconds(self, pa):
# with version 2.6, pyarrow defaults to writing the nanoseconds, so
# this should work without error
# Note in previous pyarrows(<7.0.0), only the pseudo-version 2.0 was available
if not pa_version_under7p0:
ver = "2.6"
else:
ver = "2.0"
df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
check_round_trip(df, pa, write_kwargs={"version": ver})
def test_timezone_aware_index(self, request, pa, timezone_aware_date_list):
if (
not pa_version_under7p0
and timezone_aware_date_list.tzinfo != datetime.timezone.utc
):
request.node.add_marker(
pytest.mark.xfail(
reason="temporary skip this test until it is properly resolved: "
"https://github.com/pandas-dev/pandas/issues/37286"
)
)
idx = 5 * [timezone_aware_date_list]
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
# see gh-36004
# compare time(zone) values only, skip their class:
# pyarrow always creates fixed offset timezones using pytz.FixedOffset()
# even if it was datetime.timezone() originally
#
# technically they are the same:
# they both implement datetime.tzinfo
# they both wrap datetime.timedelta()
# this use-case sets the resolution to 1 minute
check_round_trip(df, pa, check_dtype=False)
@td.skip_if_no("pyarrow")
def test_filter_row_groups(self, pa):
# https://github.com/pandas-dev/pandas/issues/26551
df = pd.DataFrame({"a": list(range(0, 3))})
with tm.ensure_clean() as path:
df.to_parquet(path, pa)
result = read_parquet(
path, pa, filters=[("a", "==", 0)], use_legacy_dataset=False
)
assert len(result) == 1
def test_read_parquet_manager(self, pa, using_array_manager):
# ensure that read_parquet honors the pandas.options.mode.data_manager option
df = pd.DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
with tm.ensure_clean() as path:
df.to_parquet(path, pa)
result = read_parquet(path, pa)
if using_array_manager:
assert isinstance(result._mgr, pd.core.internals.ArrayManager)
else:
assert isinstance(result._mgr, pd.core.internals.BlockManager)
def test_read_dtype_backend_pyarrow_config(self, pa, df_full):
import pyarrow
df = df_full
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
dti = dti._with_freq(None) # freq doesn't round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
pa_table = pyarrow.Table.from_pandas(df)
expected = pa_table.to_pandas(types_mapper=pd.ArrowDtype)
# pyarrow infers datetimes as us instead of ns
expected["datetime"] = expected["datetime"].astype("timestamp[us][pyarrow]")
expected["datetime_with_nat"] = expected["datetime_with_nat"].astype(
"timestamp[us][pyarrow]"
)
expected["datetime_tz"] = expected["datetime_tz"].astype(
pd.ArrowDtype(pyarrow.timestamp(unit="us", tz="Europe/Brussels"))
)
check_round_trip(
df,
engine=pa,
read_kwargs={"dtype_backend": "pyarrow"},
expected=expected,
)
def test_read_dtype_backend_pyarrow_config_index(self, pa):
df = pd.DataFrame(
{"a": [1, 2]}, index=pd.Index([3, 4], name="test"), dtype="int64[pyarrow]"
)
expected = df.copy()
import pyarrow
if Version(pyarrow.__version__) > Version("11.0.0"):
expected.index = expected.index.astype("int64[pyarrow]")
check_round_trip(
df,
engine=pa,
read_kwargs={"dtype_backend": "pyarrow"},
expected=expected,
)
def test_columns_dtypes_not_invalid(self, pa):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
check_round_trip(df, pa)
# bytes
df.columns = [b"foo", b"bar"]
with pytest.raises(NotImplementedError, match="|S3"):
# Bytes fails on read_parquet
check_round_trip(df, pa)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
check_round_trip(df, pa)
def test_empty_columns(self, pa):
# GH 52034
df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name"))
check_round_trip(df, pa)
class TestParquetFastParquet(Base):
def test_basic(self, fp, df_full):
df = df_full
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
dti = dti._with_freq(None) # freq doesn't round-trip
df["datetime_tz"] = dti
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
def test_columns_dtypes_invalid(self, fp):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
err = TypeError
msg = "Column name must be a string"
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, fp, err, msg)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, fp, err, msg)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, fp, err, msg)
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
msg = "Cannot create parquet dataset with duplicate column names"
self.check_error_on_write(df, fp, ValueError, msg)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
# Fastparquet bug in 0.7.1 makes it so that this dtype becomes
# float64
check_round_trip(df, fp, expected=expected, check_dtype=False)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# error from fastparquet -> don't check exact error message
self.check_error_on_write(df, fp, ValueError, None)
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
msg = "Can't infer object conversion type"
self.check_error_on_write(df, fp, ValueError, msg)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
@pytest.mark.single_cpu
def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so):
# GH #19134
check_round_trip(
df_compat,
fp,
path="s3://pandas-test/fastparquet.parquet",
read_kwargs={"storage_options": s3so},
write_kwargs={"compression": None, "storage_options": s3so},
)
def test_partition_cols_supported(self, tmp_path, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
df.to_parquet(
tmp_path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(tmp_path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats
assert len(actual_partition_cols) == 2
def test_partition_cols_string(self, tmp_path, fp, df_full):
# GH #27117
partition_cols = "bool"
df = df_full
df.to_parquet(
tmp_path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(tmp_path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats
assert len(actual_partition_cols) == 1
def test_partition_on_supported(self, tmp_path, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
df.to_parquet(
tmp_path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(tmp_path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(
self, tmp_path, fp, df_full
):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
msg = (
"Cannot use both partition_on and partition_cols. Use partition_cols for "
"partitioning data"
)
with pytest.raises(ValueError, match=msg):
df.to_parquet(
tmp_path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
partition_cols=partition_cols,
)
def test_empty_dataframe(self, fp):
# GH #27339
df = pd.DataFrame(index=[], columns=[])
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
def test_timezone_aware_index(self, fp, timezone_aware_date_list):
idx = 5 * [timezone_aware_date_list]
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
def test_use_nullable_dtypes_not_supported(self, fp):
df = pd.DataFrame({"a": [1, 2]})
with tm.ensure_clean() as path:
df.to_parquet(path)
with pytest.raises(ValueError, match="not supported for the fastparquet"):
with tm.assert_produces_warning(FutureWarning):
read_parquet(path, engine="fastparquet", use_nullable_dtypes=True)
with pytest.raises(ValueError, match="not supported for the fastparquet"):
read_parquet(path, engine="fastparquet", dtype_backend="pyarrow")
def test_close_file_handle_on_read_error(self):
with tm.ensure_clean("test.parquet") as path:
pathlib.Path(path).write_bytes(b"breakit")
with pytest.raises(Exception, match=""): # Not important which exception
read_parquet(path, engine="fastparquet")
# The next line raises an error on Windows if the file is still open
pathlib.Path(path).unlink(missing_ok=False)
def test_bytes_file_name(self, engine):
# GH#48944
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
with tm.ensure_clean("test.parquet") as path:
with open(path.encode(), "wb") as f:
df.to_parquet(f)
result = read_parquet(path, engine=engine)
tm.assert_frame_equal(result, df)
def test_invalid_dtype_backend(self, engine):
msg = (
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
"'pyarrow' are allowed."
)
df = pd.DataFrame({"int": list(range(1, 4))})
with tm.ensure_clean("tmp.parquet") as path:
df.to_parquet(path)
with pytest.raises(ValueError, match=msg):
read_parquet(path, dtype_backend="numpy")
def test_empty_columns(self, fp):
# GH 52034
df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name"))
expected = pd.DataFrame(
columns=pd.Index([], dtype=object),
index=pd.Index(["a", "b", "c"], name="custom name"),
)
check_round_trip(df, fp, expected=expected)
| [
"[email protected]"
] | |
2b7ddd09aeac0d38f1ee11ee873acf5ee1e7cb06 | 8a52e36a792e3d22e9ae70a6b261e5fa26195c38 | /wandoujia/spiders/qianliyanSpider.py | d3c9987b193664c9bc92e33fb6e31524d66f5f03 | [] | no_license | beautifulmistake/Whole-app | 77d55a3ced0d43fc5c670d2e86e0e22467df2ca4 | cee208bd67fdddb87f4e58ee22466d722f7adc05 | refs/heads/master | 2020-05-31T07:25:28.080565 | 2019-06-04T08:56:44 | 2019-06-04T08:56:44 | 190,165,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | from urllib.parse import urljoin
import scrapy
from wandoujia.items import QianLiYanItem
class QianLiYanSpider(scrapy.Spider):
name = "QianLiYan"
def start_requests(self):
"""
读取文件获取初始url
:return:
"""
with open(r'G:\工作\APP\wandoujia\urls.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
for url in lines[:1]:
# 发起请求
yield scrapy.Request(url=url.strip())
def parse(self, response):
"""
解析页面获取详情页的看链接
:param response:
:return:
"""
# 当前的url
curr_url = response.url
# 获取拼接前的url
base = "/".join(curr_url.split("/")[:3])
# 获取详情页的列表
detail_urls = response.xpath('//div[@id="main"]/table[@id="category_table"]/'
'tbody/tr/td[2]/div/a/@href').extract()
# 下一页
is_next = response.xpath('//table[@id="category_table"]/tfoot//a[last()]/text()').extract_first()
# 请求详情页
for detail_url in detail_urls:
url = urljoin(base, detail_url)
print("查看获取的url:", url)
yield scrapy.Request(url=url, callback=self.parse_detail)
if is_next == "下一页":
# 获得下一页的链接
next = response.xpath('//table[@id="category_table"]/tfoot//a[last()]/@href').extract_first()
yield scrapy.Request(url=urljoin(base, next), callback=self.parse)
def parse_detail(self, response):
"""
解析详情页
:param response:
:return:
"""
# 创建item
item = QianLiYanItem()
# 搜索标题
search_title = response.xpath('//div[@id="main"]/div/article/h1/text()').extract_first()
# 服务区域,------>列表
service_area = response.xpath('//div[@id="main"]/div/article/section[1]'
'/ul/li[1]/descendant-or-self::text()').extract()
# 联系人,------>列表
contact = response.xpath('//div[@id="main"]/div/article/section[1]'
'/ul/li[2]/descendant-or-self::text()').extract()
# 联系人手机(图片)
image_urls = response.xpath('//div[@id="main"]/div/article/section[1]'
'/ul/li[3]/span[2]/img/@src').extract_first()
# 联系人QQ
contact_qq = response.xpath('//div[@id="main"]/div/article/section[1]'
'/ul/li[4]/descendant-or-self::text()').extract()
# 联系人邮箱
contact_email = response.xpath('//div[@id="main"]/div/article/section[1]'
'/ul/li[5]/descendant-or-self::text()').extract()
item['search_title'] = search_title
item['service_area'] = service_area
item['contact'] = contact
item['image_urls'] = image_urls
item['contact_qq'] = contact_qq
item['contact_email'] = contact_email
yield item
| [
"[email protected]"
] | |
6edb3d2cddfae5ec7c09ce1b3da816f9fdc28865 | 7680dbfce22b31835107403514f1489a8afcf3df | /Teoria/teoria__lista_4.py | f7bb19bdeb60df58bd01289c6834859a4cb774a9 | [] | no_license | EstephanoBartenski/Aprendendo_Python | c0022d545af00c14e6778f6a80f666de31a7659e | 69b4c2e07511a0bd91ac19df59aa9dafdf28fda3 | refs/heads/master | 2022-11-27T17:14:00.949163 | 2020-08-03T22:11:19 | 2020-08-03T22:11:19 | 284,564,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | teste = list()
teste.append('Gustavo')
teste.append(40)
print(teste)
galera = list()
# galera.append(teste) ISSO AQUI NÃO PODE
galera.append(teste[:])
teste[0] = 'Maria'
teste[1] = 22
galera.append(teste[:])
print(galera)
print()
pessoal = [['Joana', 33], ['Marcelo', 45], ['Otávio', 50], ['Jorge', 12], ['Bárbara', 14]]
for p in pessoal:
print(f'O(a) {p[0]} tem {p[1]} anos de idade.')
| [
"[email protected]"
] | |
89796e8c58eacdcc25209c3a9e140a8165b53512 | a0830282305fcb38bee60c8b0fbce031c9934ff8 | /app.py | 4ca940ec58e8869e34de864c8a3ca9e23cb68456 | [] | no_license | sailorlg/DemoFlask | 0fdb7effc233e9b6a58b48d938818d766dc4b458 | 63590f0592ee0bd7a928107961a914b1eb1c8a1c | refs/heads/master | 2023-08-23T19:32:27.186166 | 2021-09-27T05:33:24 | 2021-09-27T05:33:24 | 404,283,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,963 | py | #
# import os
#
# from flask import Flask, url_for, request, redirect, make_response, json, jsonify, session, abort, g
# from urllib.parse import urlparse, urljoin
# import click
#
# from ConfigDemo import ConfigDemo
#
# app = Flask(__name__) # 创建FlaskApp
# app = ConfigDemo(app).app # 读入全局配置变量
#
#
# @app.route("/")
# def index():
# return "<H1>Hello, World!</H1>"
#
# @app.route("/hi", methods=['GET', 'POST'])
# @app.route("/hello", methods=['GET', 'POST'])
# def say_hello():
# """
# Function:获取从浏览器传过来的参数name的值, 并显示
# 从cookie中获取值
# 从session中取值
# :return:
# """
# print(request)
# print(request.args)
#
# # v_name = request.args.get('name')
# # if v_name is None: # 如果浏览器没有数据name的值
# # v_name = "Nobody"
# # v_name = request.args.get('name', 'Nobody') # 等价于上面的代码
#
# v_name = request.args.get('name') # 从浏览器的URL中获取name值
# if v_name is None:
# v_name = request.cookies.get('name', 'COOKIE')
#
# response = "<H1>Say Hello to {}!</H1>".format(v_name)
#
# # 根据用户认证状态返回不同的值
# if 'logged_in' in session:
# response += '[Authenticated]'
# else:
# response += "UN-Authenticated"
# return response
#
# @app.route("/greet/", defaults={"name": "Programmer"})
# @app.route("/greet/<name>")
# def greet(name):
# return "<H1>Welcome, %s!</H1>" % name
#
# @app.route("/demoname")
# def demoname():
# v_demo_name = app.config['DEMO_NAME']
# return "<H1>I am {0}!</H1>".format(v_demo_name)
#
# @app.route("/geturl")
# def geturl():
# """
# Function:测试url_for()函数
# :return:
# """
# v_url = url_for("greet")
# v_url_external = url_for("greet", _external=True)
# return ("Greet's internal URL is {0}, external URL is {1}.".format(v_url, v_url_external))
#
# @app.cli.command()
# def get_demo_name():
# """
# Function: 测试Flask CLI命令的功能
# 在进入虚拟环境的命令行中输入"flask get-demo-name"可执行该命令
# :return:
# """
# v_demo_name = app.config['DEMO_NAME']
# click.echo(v_demo_name)
#
# @app.route("/goback/<int:year>")
# def go_back(year):
# v_now = year
# v_atfer_year = 2021 + year
# return "Welcome to atfer %d years, Now is %d." % (v_now, v_atfer_year)
#
# colors = ['blue', 'white', 'red']
# @app.route("/color/<any(%s):color>"%str(colors)[1:-1])
# def any_colors(color):
# return "<H1><font color=%s>Welcome</font></H1>"%color
#
# @app.route("/redirect")
# def redirect_goto():
# """
# Function:测试重定向功能
# :return:
# """
# print("app.py => redirect_to ")
#
# # 方法1
# # return "", 302, {"Location": "HTTP://www.imarketchina.net"}
#
# # 方法2
# return redirect("http://bbs.fengniao.com")
#
# # 方法3
# # return redirect(url_for("say_hello"))
#
# @app.route("/contenttype/<type>")
# def return_requested_type(type):
# """
# Function:相应格式测试
# 测试cookie
# :param type:
# :return:
# """
#
# if type.upper() == "JSON":
# # 方法1
# # v_data = {"name":"ZhangSan",
# # "Job": "Student"}
# # response = make_response(json.dumps(v_data))
# # response.mimetype = "application/json"
# # return response
# v_data = {"name":"ZhangSan",
# "Job": "Student"}
# response = jsonify(v_data)
# elif type.upper() == "HTML":
# v_data = "<!DOCTYPE html>" \
# "<html>" \
# "<head></head>" \
# "<body>" \
# " <H1>Note</H1>" \
# " <p>To: Jane</p>" \
# " <p><font color=red>Content: I LOVE YOU</font></p>" \
# "</body>" \
# "</html>"
# response = make_response(v_data)
# response.mimetype = "text/html"
#
# else:
# response = jsonify(message="HTML or JSON"), 500
#
# response.set_cookie('name', type)
#
# return response
#
# @app.route("/set/<name>")
# def set_cookie(name):
# response = make_response(redirect(url_for("say_hello")))
# response.set_cookie("name", name)
# return response
#
# @app.route("/login")
# def login():
# """
# Function: 设置session
# :return:
# """
# session['logged_in'] = True # 写入session
# return redirect(url_for('say_hello'))
#
#
# @app.route("/admin")
# def admin():
# if 'logged_in' not in session:
# abort("403")
# return "<H1>WELCOME</H1>"
#
# @app.route("/logout")
# def logout():
# if 'logged_in' in session:
# session.pop('logged_in')
# return redirect(url_for('say_hello'))
#
# @app.route("/goods")
# def goods_page():
# return "<H1>Goods Page</H1><a href='%s'> Do Something</a>" % \
# url_for('do_something', next=request.full_path)
#
# @app.route("/oders")
# def orders_page():
# return "<H1>Orders Page</H1><a href='%s'> Do Something</a>" % \
# url_for('do_something', next=request.full_path)
#
# @app.route("/do-something")
# def do_something():
# """
# Function:测试重定向功能
# :return:
# """
# # return redirect(url_for("say_hello"))
# print("Now, going back - step 01")
# return redirect_goback()
#
# def redirect_goback(default_page='demoname', **kwargs):
# print("Now, going back - step 02")
# for target in request.args.get("next"), request.referrer:
# print("redirect_goback => ", target)
# if not target:
# print("redirect_goback => continue")
# continue
# if is_safe_url(target):
# return redirect(target)
# return redirect(url_for(default_page, **kwargs))
#
# @app.route("/logout")
# def log_out_function():
# """
# Function: 测试退出功能
# :return:
# """
# if "logged_in" in session:
# session.pop("logged_in")
# return redirect(url_for("say_hello"))
#
# @app.before_request
# def get_name():
# g.name = request.args.get('name')
# print(g.name)
#
# def is_safe_url(target):
# """
# Function: 判断跳转的目标URL是不是安全的.
# :param target:
# :return:
# """
# print("is_safe_url => target : ", target)
# rer_url = urlparse(request.host_url)
# print("is_safe_url => rer_url : ", rer_url)
# test_url = urlparse(urljoin(request.host_url, target))
# print("is_safe_url => test_url: ", test_url)
# return test_url.scheme in ("http", "https") and rer_url.netloc == test_url.netloc
import os
from jinja2.utils import generate_lorem_ipsum, escape
from flask import Flask, url_for, request, redirect, make_response, json, jsonify, session, abort, g, \
render_template, Markup, flash, send_from_directory
from urllib.parse import urlparse, urljoin
# Chapter: 4.4.4_4
from flask_wtf.csrf import validate_csrf # 验证CSRF令牌
from wtforms import ValidationError
from flask_ckeditor import CKEditor # 传入CDEditor, Chapter 4.4.5
import click
import uuid
from form.forms import LoginForm, AskAgeForm, AskNameForm, UploadSingleImageForm, \
UploadMultiImageForm, IntroducePictureForm # 导入form文件夹下form.py文件的LoginForm类
from ConfigDemo import ConfigDemo
app = Flask(__name__) # 创建FlaskApp
app = ConfigDemo(app).app # 读入全局配置变量
ckeditor = CKEditor(app) # 实例化Flask-CKEditor提供的CKEditor类
@app.route("/post")
def show_post():
"""
Function: 演示通过Ajax的部分加载
:return:
"""
v_post_body = generate_lorem_ipsum(n=2) # 随机生成2段文本
return """
<H1>A very log post</H1>
<div class="body">%s</div>
<button id="load">Load More ...</button>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<script type="text/javascript">
$(function() {
$('#load').click(function() {
$.ajax({
url:'/more', // 目标URL
type: 'get', // 请求方法
success: function(data){ // 返回2**相应后触发的回调函数
$('.body').append(data); // 将返回的响应写入页面中
}
})
})
})
</script>
""" % v_post_body
@app.route("/more")
def load_post():
"""
Function: 在show_post()方法部分加载时, 提供需要加载的内容
:return:
"""
print("load_post => is running......")
return generate_lorem_ipsum(n=1)
@app.route("/hello")
def hello_function():
"""
Function: 演示对输入进行转义清洗的功能
:return:
"""
v_name = request.args.get('name', 'My Owner')
# return '<H1>Hello, %s!</H1>' % v_name # 没有清洗的功能
return '<H1>Hello, %s!</H1>' % escape(v_name) # 通过escape进行清洗
@app.route("/watchlist")
def f_watchlist():
"""
Function: 演示模板的使用
:return:
"""
v_user = {"username": "ZhangSan",
"bio": "A boy only"}
v_movies = [
{"name": "My Neighbor Totoro", "year": "1988"},
{"name": "Three Colors Trilogy", "year": "1993"},
{"name": "Perfect Blue", "year": "1997"},
{"name": "The Bucket List", "year": "2007"},
{"name": "CoCo", "year": "2017"},
]
return render_template('watchlist.html', user=v_user, movies=v_movies)
@app.route("/")
def index_view():
"""
:return:
"""
print("app.py => index_view : running")
return render_template('index.html', foo="ImFoo", name="baz")
@app.template_filter()
def musicals(s):
"""
Function: 注册自定义的过滤器
在给定字符串后面加上音乐符号
:param s:
:return:
"""
return s + Markup('♫')
@app.template_global()
def bar():
"""
Function: 注册全局对象
返回字符串
:return:
"""
return "I am bar() function."
@app.template_test()
def baz(n):
"""
Function: 注册自定义测试器
:param n:
:return:
"""
if n == "baz":
return True
return False
@app.route("/flash")
def display_flash():
"""
Function: 设置flash内容后, 跳转到索引页面
在索引页面中读取flash内容后显示
:return:
"""
flash("I am flash, Only Flash!")
flash("Second Flash!")
return redirect(url_for("index_view"))
@app.errorhandler(404)
def page_not_found (e):
return render_template('errors/404.html'), 404
@app.route("/basic", methods=['GET', 'POST'])
def basic():
print("app.py => basic : running ")
form = LoginForm() # GET + POST
print("app.py => basic : get form")
if form.validate_on_submit():
print("app.py => basic : form has been checked!")
username = form.username.data
print("app.py => basic => username : " + username)
flash("Welcome, %s" % username)
return redirect(url_for("index_view"))
return render_template('basic.html', form=form)
@app.route("/age", methods=['GET', 'POST'])
def ask_age():
"""
Function: 演示自定义验证器
Chapter: 4.4.3_1
:return:
"""
form = AskAgeForm()
if form.validate_on_submit():
print("app.py => ask_age : runnging ")
return render_template('age.htm', form=form)
@app.route("/yourname", methods=['GET', 'POST'])
def ask_name():
"""
Function: 演示自定义验证器, 全局验证器
Chapter: 4.4.3_2
:return:
"""
form = AskNameForm()
if form.validate_on_submit():
print("app.py => ask_name : runnging ")
return render_template('name.html', form=form)
@app.route("/uploadimage", methods=['GET', 'POST'])
def upload_single_image():
"""
Function:演示上传文件(图片),一个文件
Chapter: 4.4.4_1~3
:return:
"""
form = UploadSingleImageForm()
if form.validate_on_submit():
image_file = form.image.data # 获取图片文件
image_file_name = random_filename(image_file.filename) # 获取图片文件名字后转换成随机的名字
print("app.py => upload_single_image => image_file_name : " + image_file_name)
image_file.save(os.path.join(app.config['UPLOAD_PATH'], image_file_name)) # 保存图片文件
flash('Upload Success!')
session['image_file_name'] = [image_file_name] # 把图片文件名字保存到session中
return redirect(url_for("show_images")) # 重定向到指定show_images视图函数对应的URL, 这种方式会打开另一个页面
return render_template("upload_one_image.html", form=form)
def random_filename(filename):
"""
Function: 演示上传文件(图片)
根据上传的文件名字, 生成随机的文件名字
:param filename:
:return:
"""
ext_name = os.path.splitext(filename)[1]
new_filename = uuid.uuid4().hex + ext_name
return new_filename
@app.route("/images/<path:filename>")
def get_file(filename):
"""
Function:
Chapter: 4.4.4_1~3
:param file_name:
:return:
"""
print("app.py => upload_single_image => get_file => filename : " + filename)
return send_from_directory(app.config['UPLOAD_PATH'], filename)
@app.route("/images")
def show_images():
"""
Function:显示上传的图片
Chapter: 4.4.4_1~3
:return:
"""
print("app.py => show_images : running")
image_name = session['image_file_name'] # 从session中去除图片文件名
return render_template('show_images.html', filename=image_name, number=1)
@app.route("/uploadmoreimages", methods=['GET', 'POST'])
def upload_more_image():
"""
Function:演示上传文件(图片),多个文件
演示富文本编辑器
单个表单多个提交按钮
Chapter: 4.4.4_4, 4.4.5, 4.4.6
:return:
"""
form = UploadMultiImageForm()
if request.method == "POST":
if form.tempsubmit.data: # 点击了临时提交按钮
print("app.py => upload_more_image : " + "接收到临时提交")
flash("Got Temp submitted content!")
return redirect(url_for("index_view"))
if form.submit.data: # 点击了提交按钮
filenames = []
# 验证CSRF令牌
# 传入表单中csrf_token隐藏字段的值, 如果抛出wtforms.ValidationError异常,则表明验证没有通过
try:
validate_csrf(form.csrf_token.data)
except ValidationError:
flash("CSRF token error")
return redirect(url_for("more_images"))
# 显示富文本上传的内容
richtext = form.detail.data
print(richtext)
# 检查文件是否存在
# 确保字段中包含文件数据, 如果用户没有选择文件就提交表单则request.files为空
# "images"是表单字段名
if 'images' not in request.files:
flash("This filed is required!")
return redirect(url_for("more_images"))
# 循环处理图片文件
for a_image in request.files.getlist('images'):
# 检查文件类型
if a_image and allowed_file(a_image.filename):
imagename = random_filename(a_image.filename)
a_image.save(os.path.join(app.config['UPLOAD_PATH'], imagename))
filenames.append(imagename)
else:
flash("Invalid file type!")
return redirect(url_for("more_images"))
flash("Images upload success!")
session['image_names'] = filenames
return redirect(url_for("more_images"))
return render_template("upload_more_images.html", form=form)
@app.route("/moreimages")
def more_images():
"""
Function: 显示多个图片
Chapger: 4.4.4_4
:return:
"""
print("app.py => more_images : running")
image_name = session['image_names'] # 从session中去除图片文件名
print("app.py => more_images => image_name : " + str(image_name))
return render_template('show_images.html', filename=image_name, number=len(image_name))
def allowed_file(file_name):
"""
Function:验证文件类型, 文件名中有".", 并且扩展名是在配置文件中指定的扩展名
:param file_name:
:return:
"""
return '.' in file_name and file_name.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
@app.route("/multiform", methods=['GET', 'POST'])
def one_page_two_forms():
"""
功能: 演示一个页面处理多个表单的功能,一个视图
chapter: 4.4.7_1
:return:
"""
imageform = UploadMultiImageForm()
modelform = IntroducePictureForm()
if imageform.submitimage.data: # UploadMultiImageForm被提交
if imageform.tempsubmit.data: # 点击了临时提交按钮
flash("Got UploadMultiImageForm Temp submitted content!")
return redirect(url_for("index_view"))
if imageform.submitimage.data: # 点击了提交按钮
filenames = []
# 验证CSRF令牌
# 传入表单中csrf_token隐藏字段的值, 如果抛出wtforms.ValidationError异常,则表明验证没有通过
try:
validate_csrf(imageform.csrf_token.data)
except ValidationError:
flash("CSRF token error")
return redirect(url_for("more_images"))
# 显示富文本上传的内容
richtext = imageform.detail.data
print("app.py => one_page_two_forms => richtext " + richtext)
# 检查文件是否存在
# 确保字段中包含文件数据, 如果用户没有选择文件就提交表单则request.files为空
# "images"是表单字段名
if len(imageform.images.data) <= 0:
flash("This filed is required!")
return redirect(url_for("more_images"))
print("app.py => one_page_two_forms => imageform : ", request.files.getlist('images'))
# 循环处理图片文件
for a_image in request.files.getlist('images'):
# 检查文件类型
if a_image and allowed_file(a_image.filename):
imagename = random_filename(a_image.filename)
a_image.save(os.path.join(app.config['UPLOAD_PATH'], imagename))
filenames.append(imagename)
else:
flash("Invalid file type!")
return redirect(url_for("more_images"))
flash("Images upload success!")
session['image_names'] = filenames
return redirect(url_for("more_images"))
if modelform.submitmodel.data: # IntroducePictureForm被提交, 点击了提交按钮
modile_pic_list = []
# 验证CSRF令牌
# 传入表单中csrf_token隐藏字段的值, 如果抛出wtforms.ValidationError异常,则表明验证没有通过
try:
validate_csrf(modelform.csrf_token.data)
except ValidationError:
flash("CSRF token error")
return redirect(url_for("more_images"))
print("app.py => one_page_two_forms => modelform : " + str(modelform))
print("app.py => one_page_two_forms => modelpic : ", request.files.getlist('modelpic'))
# 检查文件是否存在
# 确保字段中包含文件数据, 如果用户没有选择文件就提交表单则request.files为空
# "images"是表单字段名
print("app.py => one_page_two_forms => modelform.data :", modelform.modelpic.data)
if len(modelform.modelpic.data) <= 0:
flash("This filed is required!")
return redirect(url_for("more_images"))
# 循环处理图片文件
for a_image in request.files.getlist('modelpic'):
# 检查文件类型
if a_image and allowed_file(a_image.filename):
print("app.py => one_page_two_forms => modelform => a_image.filename : " + a_image.filename)
imagename = random_filename(a_image.filename)
a_image.save(os.path.join(app.config['UPLOAD_PATH'], imagename))
modile_pic_list.append(imagename)
else:
flash("Invalid file type!")
return redirect(url_for("more_images"))
flash("Images upload success!")
print("app.py => one_page_two_forms => modile_pic_list : ", modile_pic_list)
session['image_names'] = modile_pic_list
return redirect(url_for("more_images"))
return render_template("onepage_moreforms_1.html", imageform=imageform, modelform=modelform)
@app.route("/multiformview", methods=['GET'])
def more_form_more_vew_display():
"""
功能: 演示一个页面处理多个表单的功能, 多个视图
处理GET请求,进行显示
chapter: 4.4.7_2
:return:
"""
imageform = UploadMultiImageForm()
modelform = IntroducePictureForm()
return render_template("onepage_morefors_2.html", imageform=imageform, modelform=modelform)
@app.route("/multiformview-pic", methods=['POST'])
def more_form_more_vew_pictures():
"""
功能: 演示一个页面处理多个表单的功能, 多个视图
UploadMultiImageForm的POST请求
:return:
"""
imageform = UploadMultiImageForm()
modelform = IntroducePictureForm()
if imageform.validate_on_submit(): # UploadMultiImageForm被提交
if imageform.tempsubmit.data: # 点击了临时提交按钮
flash("Got UploadMultiImageForm Temp submitted content!")
return redirect(url_for("index_view"))
if imageform.submitimage.data: # 点击了提交按钮
filenames = []
# 验证CSRF令牌
# 传入表单中csrf_token隐藏字段的值, 如果抛出wtforms.ValidationError异常,则表明验证没有通过
try:
validate_csrf(imageform.csrf_token.data)
except ValidationError:
flash("CSRF token error")
return redirect(url_for("more_images"))
# 显示富文本上传的内容
richtext = imageform.detail.data
print("app.py => one_page_two_forms => richtext " + richtext)
# 检查文件是否存在
# 确保字段中包含文件数据, 如果用户没有选择文件就提交表单则request.files为空
# "images"是表单字段名
if len(imageform.images.data) <= 0:
flash("This filed is required!")
return redirect(url_for("more_images"))
print("app.py => one_page_two_forms => imageform : ", request.files.getlist('images'))
# 循环处理图片文件
for a_image in request.files.getlist('images'):
# 检查文件类型
if a_image and allowed_file(a_image.filename):
imagename = random_filename(a_image.filename)
a_image.save(os.path.join(app.config['UPLOAD_PATH'], imagename))
filenames.append(imagename)
else:
flash("Invalid file type!")
return redirect(url_for("more_images"))
flash("Images upload success!")
session['image_names'] = filenames
return redirect(url_for("more_images"))
@app.route("/multiformview-model", methods=['POST'])
def more_form_more_vew_models():
"""
功能: 演示一个页面处理多个表单的功能, 多个视图
IntroducePictureForm的POST请求
:return:
"""
imageform = UploadMultiImageForm()
modelform = IntroducePictureForm()
if modelform.validate_on_submit(): # IntroducePictureForm被提交, 点击了提交按钮
modile_pic_list = []
# 验证CSRF令牌
# 传入表单中csrf_token隐藏字段的值, 如果抛出wtforms.ValidationError异常,则表明验证没有通过
try:
validate_csrf(modelform.csrf_token.data)
except ValidationError:
flash("CSRF token error")
return redirect(url_for("more_images"))
print("app.py => one_page_two_forms => modelform : " + str(modelform))
print("app.py => one_page_two_forms => modelpic : ", request.files.getlist('modelpic'))
# 检查文件是否存在
# 确保字段中包含文件数据, 如果用户没有选择文件就提交表单则request.files为空
# "images"是表单字段名
print("app.py => one_page_two_forms => modelform.data :", modelform.modelpic.data)
if len(modelform.modelpic.data) <= 0:
flash("This filed is required!")
return redirect(url_for("more_images"))
# 循环处理图片文件
for a_image in request.files.getlist('modelpic'):
# 检查文件类型
if a_image and allowed_file(a_image.filename):
print("app.py => one_page_two_forms => modelform => a_image.filename : " + a_image.filename)
imagename = random_filename(a_image.filename)
a_image.save(os.path.join(app.config['UPLOAD_PATH'], imagename))
modile_pic_list.append(imagename)
else:
flash("Invalid file type!")
return redirect(url_for("more_images"))
flash("Images upload success!")
print("app.py => one_page_two_forms => modile_pic_list : ", modile_pic_list)
session['image_names'] = modile_pic_list
return redirect(url_for("more_images")) | [
"[email protected]"
] | |
2cd393130f9913c27560502518c7ea7a5860ccfe | 5be29c3b055a335120186de58d8d08bab6a4de7a | /KFC_server/regist_Ui.py | 6b85c7875fb7e61f010a51f1cb06a8cd93a92729 | [] | no_license | wangredfei/projects | aabb68f45c661db4d60dc13e34d069c4fa77e423 | 3dfbf4467a10e43296a6397cebbfb2e6d78e684b | refs/heads/master | 2022-12-09T07:19:07.538950 | 2019-03-18T06:57:12 | 2019-03-18T06:57:12 | 176,221,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | from PyQt5.Qt import *
import os
class Regist_Ui(QDialog):
'''这是管理员的注册'''
def newWindowUI(self, dialog):
dialog.resize(300,500)
self.dialog = dialog
self.button()
self.lineEdit()
self.background_show()
def button(self):
self.button1 = QPushButton(self.dialog)
self.button1.setGeometry(QRect(270, 10, 20, 20))
self.button1.setText("×")
self.button1.clicked.connect(self.dialog.close)
self.button2 = QPushButton(self.dialog)
self.button2.setGeometry(QRect(110, 400,80, 20))
self.button2.setText("注册")
def lineEdit(self):
self.lineEdit = QLineEdit(self.dialog)
self.lineEdit.setGeometry(75, 235, 150, 30)
self.lineEdit.setInputMask("")
self.lineEdit.setText("")
self.lineEdit.setMaxLength(17)
self.lineEdit.setFrame(True)
self.lineEdit.setEchoMode(QLineEdit.Normal)
self.lineEdit.setAlignment(Qt.AlignCenter)
self.lineEdit.setDragEnabled(False)
self.lineEdit.setClearButtonEnabled(False)
self.lineEdit.setObjectName("lineEdit")
self.lineEdit.setPlaceholderText('姓名')
self.lineEdit_1 = QLineEdit(self.dialog)
self.lineEdit_1.setGeometry(75, 275, 150, 30)
self.lineEdit_1.setInputMask("")
self.lineEdit_1.setText("")
self.lineEdit_1.setMaxLength(17)
self.lineEdit_1.setFrame(True)
self.lineEdit_1.setEchoMode(QLineEdit.Normal)
self.lineEdit_1.setAlignment(Qt.AlignCenter)
self.lineEdit_1.setDragEnabled(False)
self.lineEdit_1.setClearButtonEnabled(False)
self.lineEdit_1.setObjectName("lineEdit")
self.lineEdit_1.setPlaceholderText('手机号')
self.lineEdit_2 = QLineEdit(self.dialog)
self.lineEdit_2.setGeometry(75, 315, 150, 30)
self.lineEdit_2.setMaxLength(17)
self.lineEdit_2.setEchoMode(QLineEdit.Password)
self.lineEdit_2.setAlignment(Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_2.setPlaceholderText('密码')
def background_show(self):
self.path= (os.path.dirname(__file__)).replace('\\','/')
#添加窗口背景图片
palette1 = QPalette()
palette1.setBrush(self.dialog.backgroundRole(), QBrush(QPixmap('%s/Images/regist.jpg'%self.path)))
self.dialog.setPalette(palette1)
| [
"[email protected]"
] | |
145be8c13b53b68fae56bd1168d4c0ea048dab87 | 24a9c8f2fac4e2b20f731387336ec4e22d5fd2c7 | /project2/L河南/7、南阳市(信用中国).py | 739c30bb476a58b281d9fe6d030203723d60bec9 | [] | no_license | yunli45/pycharmProjectHome | 94833822e3036bf2baf8700c4493132e63177d4c | 9a382c060963eb801a3da07423e84a4132257b02 | refs/heads/master | 2020-05-23T23:35:20.476973 | 2019-05-16T09:45:58 | 2019-05-16T09:49:16 | 186,986,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,286 | py | # coding = utf-8
import threading
import time
from queue import Queue
import requests
import re
from bs4 import BeautifulSoup
from L河南.工具包 import 链接数据库,附件下载程序,预处理模块, 提取信息, 动态访问
class MySpider():
"""
参数说明:
headers:浏览器请求头
index_url: 爬取的首页
page_no: 页码
page_url:每一页对应的网址,post的网址基本是不变的,get会发生改变
para:post请求需要的蚕食,get请求的时候传空值不适用就好了
save_path : 附件本地保存的地址
table_name: 数据存放的表
source_library:数据来自于哪里,用于推测出省份(province)、annex_local(附件在数据中修改的地址,在预处理模块中是 参数module_name,会自动拼接 "/datafolder/" + module_name+"/")
河南省>信用河南 province(省份):河南省 annex_local(附件文中地址):河南省/信用河南
预处理模块参数module_name :"/datafolder/" + 河南省/信用河南 +"/"
annex_local:附件文中地址
province: 省份
"""
def __init__(self, headers, index_url, page_no, page_url, para, request_type,save_path, table_name, source_library,annex_local, province, proxies):
self.headers = headers
self.index_url = index_url
self.page_no = page_no
self.page_url = page_url
self.request_type = request_type
self.para = para
self.save_path = save_path
self.table_name = table_name
self.source_library = source_library
self.annex_local = annex_local
self.province = province
self.proxies = proxies
# 访问网址:post
def get_page_post(self, url, para):
i = 0
while i < 3:
try:
response = requests.post(url, data=para, headers=self.headers)
response = response.content.decode('utf-8').replace(u'\xa0', u'')
# response = response.content.decode('GB18030')
except Exception as e:
print("[info] {0}{1}".format(e, url))
i += 1
else:
return response
# 访问网址:get
def get_page_get(self, url):
i = 0
while i < 3:
try:
# response = requests.get(url, proxies= self.proxies, headers=self.headers).content.decode('GB18030')
response = requests.get(url, proxies= self.proxies, headers=self.headers).content.decode('utf-8').replace(u'\xa0', u'')
# print(response)
except Exception as e:
print("[info] {0}{1}".format(e, url))
i += 1
else:
return response
def parse_page(self):
# 分别是 某一条数据的地址、 发布(公示)时间、 处罚时间、 标题、 执法机构、 书文号、 被处罚人、 被处罚单位
src_list, penalty_date_list, release_date_list, title_list, law_enforcement_list, book_num_list = [], [], [], [], [], []
punished_people_list, punished_institution_list = [], []
print("# # # # # # # # # # # # # # #这是第:" + str(self.page_no) + "页 # # # # # # # # # # # # # # # # # # # #")
# 获取页面数据
#说明是一个get请求
if self.request_type == 'get':
response = self.get_page_get(self.page_url)
else:
response = self.get_page_post(self.page_url, self.para)
# print("页面数据" + str(response))
"""
提取页面信息 :
"""
# response = response.replace("\n", '')
response = response.replace(u'\xa0', u'').replace("'", '') # 处理python读取nbsp;为?乱码的情况
soup = BeautifulSoup(response, 'lxml')
rs = soup.find_all('tbody')
rs = str(rs).replace("\n",'').replace('\t', '').replace('\r', '').replace("'", '')
rs_list = re.findall(r'<tr.*?>.*?<td.*?title=(.*?)>.*?</td><td.*?title=(.*?)>.*?</td><td.*?title=(.*?)>.*?</td><td.*?>(.*?)</td><td.*?><a.*?href'
r'="(.*?)".*?</a>', str(rs), flags=re.M)
# print(input())
if rs_list:
for i in rs_list:
src_list.append(i[4])
# book_num_list.append(i[1])
title_list.append(i[1])
law_enforcement_list.append(i[2]) # 执法机关
# penalty_date_list.append(i[3])
release_date_list.append(i[3]) # 发布时间(公示时间)
punished_people_list.append(i[0]) # 被处罚人
# punished_institution_list.append(0) # 被处罚单位
if src_list:
for ids, src in enumerate(src_list):
title = title_list[ids].replace('"', '')
# content_src = index_url + src.replace("&", '&')
content_src = self.index_url + src
print("content_src" + str(content_src))
# data_id 用于区分这条数据的唯一性(取这条数据请求地址最后一个反斜杠后面的字符串)(本次取网址取下来的url)
data_id = src
page_no_position = "这是第" + str(self.page_no) + "页的第" + str(ids + 1) + "条的数据"
# 以这条数据的完整请求地址来查询已有的数据是否存在,存在那这条数据就pass
sql_1 = "select * from 行政案例数据库.dbo.{0} where 这条数据完整的请求地址='{1}'".format(self.table_name, content_src)
# 改进了链接数据的各个方法,现在可以一次链接获取链接对象和游标,多次使用
connect_cursor = 链接数据库.get_connect_cursor()
conn = connect_cursor[0]
cursor = connect_cursor[1]
# 查询
data_id_rs = 链接数据库.query(cursor, sql_1)
if data_id_rs is not None:
print("这条数据在已有的数据库中已存在,现在已经paa掉了,依据的原则是这条数据完整的请求地址,标题为: " + str(title))
pass
else:
"""
提取详细数据页面的相关信息:
"""
rs_cont = self.get_page_get(content_src).replace("'", '"')
cont_t = 提取信息.extracting_information_table_7(rs_cont)
release_date = release_date_list[ids] # 公示时间
book_num = cont_t[0].replace('"', '')
legal_person = cont_t[1].replace('"', '')
area = ''
penalty_date = cont_t[3] # 处罚时间
cont = cont_t[4]
law_enforcement = law_enforcement_list[ids] .replace('"', '') # 执法机构
punished_people = punished_people_list[ids].replace('"', '') # 被处罚人
punished_institution = '' # 被处罚机构
release_date = release_date
"""
文字等级计算 :
0 <= x <100 --> -1, 100<= x <200 ->1, 200<= x <1500 ->2, 1500<=x ->0
"""
text_len = re.sub(r'<[^>]+>', '', str(cont), flags=re.S) # 删除html标记
text_len = re.sub('\\s*|\t|\r|\n', '', str(text_len)) # //去除tab、空格、空行
text_len = text_len.replace(" ", '') # 去掉空格
text_len = re.sub('<script[^>]*?>[\\s\\S]*?<\\/script>', '', str(text_len)) # 删除js
text_len = re.sub('<style[^>]*?>[\\s\\S]*?<\\/style>', '', str(text_len)) # 删除style
text_len = len(text_len)
if 0<= text_len <100:
text_level = -1
elif 100<= text_len <200:
text_level = 1
elif 200<= text_len <1500:
text_level = 2
elif 200 <= text_len < 1500:
text_level = 0
else:
raise Exception("文字等级出现错误,现在居然是负数,文字的字数长度为{0}".format(text_len))
# 查询数据库中最大showid,第一次不存在指定为13300000,以后每条数据加1
sql_2 = "select max(showid) from 行政案例数据库.dbo.{0}".format(self.table_name)
# 查询
max_show_id_rs = 链接数据库.query(cursor, sql_2)
if max_show_id_rs[0] is None:
max_show_id = 13301990
else:
max_show_id = max_show_id_rs[0] + 1
# 插入数据库
if len(cont) < 30000:
insert_sql = "insert into 行政案例数据库.dbo.{0}(标题, 书文号, 法定代表人, 被处罚人, 被处罚单位或机构, 执法机构, 处罚时间, 这条数据请求地址, 这条数据完整的请求地址, 这条数据属于第几页的第几条, 模块首页的url, 来自于那个模块, 省份, 区域, showid, dataid, 文本内容1, 发布时间,文字等级, 文字字数) VALUES ('{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}','{10}','{11}','{12}','{13}','{14}','{15}','{16}','{17}','{18}',{19},{20}) ".format(self.table_name, title, book_num, legal_person, punished_people, punished_institution,
law_enforcement, penalty_date, content_src, content_src, page_no_position,
self.page_url, self.source_library, self.province, area, max_show_id, data_id, cont,
release_date, text_level, text_len)
print(insert_sql)
链接数据库.insert(cursor, insert_sql)
链接数据库.break_connect(conn)
def main():
# index_url = "http://www.hnep.gov.cn/ztzl/gdzt/xzxkjxzcfgs/xzcf/H6006021402index_1.htm"
index_url = "http://nyxyw.gov.cn"
# table_name = '行政案例数据表'
table_name = '行政案例测试表'
source_library = '河南省>南阳市'
annex_local = source_library.replace("<", '/').replace(">", '/')
save_path = "E:\行政案例附件\datafolder\\"+annex_local.replace("/", '\\')
province = source_library[:source_library.find(">")]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10-12.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'User-Agent': 'Mozilla/5.0 (Windows NT 10-12.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0'}
proxies = {
"HTTP": "http://183.146.179.159:9000",
"HTTP": "http://112.85.164.150:9999",
"HTTP": "http://115.151.4.237:9999"
}
# 构造所有的url
# for page_no in range(1, 80):
# para = ''
# page_url = index_url + "search.aspx?searchtype=0&Keyword=行政处罚&page={0}".format(page_no)
# request_type = 'get'
# MySpider(headers,index_url, page_no, page_url, para, request_type, save_path, table_name, source_library, annex_local, province, proxies).parse_page()
for page_no in range(1, 17):
page_url = index_url + 'doublePublicController/toDoublePubDatePage'
para = {
"deptModeName": "MZJ",
"licePage": "1",
"puniPage": "{0}".format(page_no),
"doubleflag": "1",
}
request_type = 'post'
MySpider(headers,index_url, page_no, page_url, para, request_type, save_path, table_name, source_library, annex_local, province, proxies).parse_page()
if __name__ == '__main__':
start = time.time()
main()
print('[info]耗时:%s' % (time.time() - start)) | [
"[email protected]"
] | |
2bb4211c31bd54a5ab45c2fa8bd7071fb0a2e52c | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.5.0a3/bin/weewx/uwxutils.py | 8ae5cb4f4f05725fc2b120d69c39329239ff64d8 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 25,248 | py | # Adapted for use with weewx
#
# This source code may be freely used, including for commercial purposes
# Steve Hatchett [email protected]
# http:#www.softwx.org/weather
"""
Functions for performing various weather related calculations.
Notes about pressure
Sensor Pressure raw pressure indicated by the barometer instrument
Station Pressure Sensor Pressure adjusted for any difference between
sensor elevation and official station elevation
Field Pressure (QFE) Usually the same as Station Pressure
Altimeter Setting (QNH) Station Pressure adjusted for elevation (assumes
standard atmosphere)
Sea Level Pressure (QFF) Station Pressure adjusted for elevation,
temperature and humidity
Notes about input parameters:
currentTemp - current instantaneous station temperature
meanTemp - average of current temp and the temperature 12 hours in
the past. If the 12 hour temp is not known, simply pass
the same value as currentTemp for the mean temp.
humidity - Value should be 0 to 100. For the pressure conversion
functions, pass a value of zero if you do not want to
the algorithm to include the humidity correction factor
in the calculation. If you provide a humidity value
> 0, then humidity effect will be included in the
calculation.
elevation - This should be the geometric altitude of the station
(this is the elevation provided by surveys and normally
used by people when they speak of elevation). Some
algorithms will convert the elevation internally into
a geopotential altitude.
sensorElevation - This should be the geometric altitude of the actual
barometric sensor (which could be different than the
official station elevation).
Notes about Sensor Pressure vs. Station Pressure:
SensorToStationPressure and StationToSensorPressure functions are based
on an ASOS algorithm. It corrects for a difference in elevation between
the official station location and the location of the barometetric sensor.
It turns out that if the elevation difference is under 30 ft, then the
algorithm will give the same result (a 0 to .01 inHg adjustment) regardless
of temperature. In that case, the difference can be covered using a simple
fixed offset. If the difference is 30 ft or greater, there is some effect
from temperature, though it is small. For example, at a 100ft difference,
the adjustment will be .13 inHg at -30F and .10 at 100F. The bottom line
is that while ASOS stations may do this calculation, it is likely unneeded
for home weather stations, and the station pressure and the sensor pressure
can be treated as equivalent."""
from __future__ import absolute_import
from __future__ import print_function
import math
def FToC(value):
return (value - 32.0) * (5.0 / 9.0)
def CToF(value):
return (9.0/5.0)*value + 32.0
def CToK(value):
return value + 273.15
def KToC(value):
return value - 273.15
def FToR(value):
return value + 459.67
def RToF(value):
return value - 459.67
def InToHPa(value):
return value / 0.02953
def HPaToIn(value):
return value * 0.02953
def FtToM(value):
return value * 0.3048
def MToFt(value):
return value / 0.3048
def InToMm(value):
return value * 25.4
def MmToIn(value):
return value / 25.4
def MToKm(value): # NB: This is *miles* to Km.
return value * 1.609344
def KmToM(value): # NB: This is Km to *miles*
return value / 1.609344
def msToKmh(value):
return value * 3.6
def Power10(y):
return pow(10.0, y)
# This maps various Pascal functions to Python functions.
Power = pow
Exp = math.exp
Round = round
class TWxUtils(object):
gravity = 9.80665 # g at sea level at lat 45.5 degrees in m/sec^2
uGC = 8.31432 # universal gas constant in J/mole-K
moleAir = 0.0289644 # mean molecular mass of air in kg/mole
moleWater = 0.01801528 # molecular weight of water in kg/mole
gasConstantAir = uGC/moleAir # (287.053) gas constant for air in J/kgK
standardSLP = 1013.25 # standard sea level pressure in hPa
standardSlpInHg = 29.921 # standard sea level pressure in inHg
standardTempK = 288.15 # standard sea level temperature in Kelvin
earthRadius45 = 6356.766 # radius of the earth at lat 45.5 degrees in km
# standard lapse rate (6.5C/1000m i.e. 6.5K/1000m)
standardLapseRate = 0.0065
# (0.0019812) standard lapse rate per foot (1.98C/1000ft)
standardLapseRateFt = standardLapseRate * 0.3048
vpLapseRateUS = 0.00275 # lapse rate used by VantagePro (2.75F/1000ft)
manBarLapseRate = 0.0117 # lapse rate from Manual of Barometry (11.7F/1000m, which = 6.5C/1000m)
@staticmethod
def StationToSensorPressure(pressureHPa, sensorElevationM, stationElevationM, currentTempC):
# from ASOS formula specified in US units
Result = InToHPa(HPaToIn(pressureHPa) / Power10(0.00813 * MToFt(sensorElevationM - stationElevationM) / FToR(CToF(currentTempC))))
return Result
@staticmethod
def StationToAltimeter(pressureHPa, elevationM, algorithm='aaMADIS'):
if algorithm == 'aaASOS':
# see ASOS training at http://www.nwstc.noaa.gov
# see also http://wahiduddin.net/calc/density_altitude.htm
Result = InToHPa(Power(Power(HPaToIn(pressureHPa), 0.1903) + (1.313E-5 * MToFt(elevationM)), 5.255))
elif algorithm == 'aaASOS2':
geopEl = TWxUtils.GeopotentialAltitude(elevationM)
k1 = TWxUtils.standardLapseRate * TWxUtils.gasConstantAir / TWxUtils.gravity # approx. 0.190263
k2 = 8.41728638E-5 # (stdLapseRate / stdTempK) * (Power(stdSLP, k1)
Result = Power(Power(pressureHPa, k1) + (k2 * geopEl), 1/k1)
elif algorithm == 'aaMADIS':
# from MADIS API by NOAA Forecast Systems Lab
# http://madis.noaa.gov/madis_api.html
k1 = 0.190284 # discrepency with calculated k1 probably
# because Smithsonian used less precise gas
# constant and gravity values
k2 = 8.4184960528E-5 # (stdLapseRate / stdTempK) * (Power(stdSLP, k1)
Result = Power(Power(pressureHPa - 0.3, k1) + (k2 * elevationM), 1/k1)
elif algorithm == 'aaNOAA':
# http://www.srh.noaa.gov/elp/wxclc/formulas/altimeterSetting.html
k1 = 0.190284 # discrepency with k1 probably because
# Smithsonian used less precise gas constant
# and gravity values
k2 = 8.42288069E-5 # (stdLapseRate / 288) * (Power(stdSLP, k1SMT)
Result = (pressureHPa - 0.3) * Power(1 + (k2 * (elevationM / Power(pressureHPa - 0.3, k1))), 1/k1)
elif algorithm == 'aaWOB':
# see http://www.wxqa.com/archive/obsman.pdf
k1 = TWxUtils.standardLapseRate * TWxUtils.gasConstantAir / TWxUtils.gravity # approx. 0.190263
k2 = 1.312603E-5 # (stdLapseRateFt / stdTempK) * Power(stdSlpInHg, k1)
Result = InToHPa(Power(Power(HPaToIn(pressureHPa), k1) + (k2 * MToFt(elevationM)), 1/k1))
elif algorithm == 'aaSMT':
# WMO Instruments and Observing Methods Report No.19
# http://www.wmo.int/pages/prog/www/IMOP/publications/IOM-19-Synoptic-AWS.pdf
k1 = 0.190284 # discrepency with calculated value probably
# because Smithsonian used less precise gas
# constant and gravity values
k2 = 4.30899E-5 # (stdLapseRate / 288) * (Power(stdSlpInHg, k1SMT))
geopEl = TWxUtils.GeopotentialAltitude(elevationM)
Result = InToHPa((HPaToIn(pressureHPa) - 0.01) * Power(1 + (k2 * (geopEl / Power(HPaToIn(pressureHPa) - 0.01, k1))), 1/k1))
else:
raise ValueError("Unknown StationToAltimeter algorithm '%s'" %
algorithm)
return Result
@staticmethod
def StationToSeaLevelPressure(pressureHPa, elevationM,
currentTempC, meanTempC, humidity,
algorithm = 'paManBar'):
Result = pressureHPa * TWxUtils.PressureReductionRatio(pressureHPa,
elevationM,
currentTempC,
meanTempC,
humidity,
algorithm)
return Result
@staticmethod
def SensorToStationPressure(pressureHPa, sensorElevationM,
stationElevationM, currentTempC):
# see ASOS training at http://www.nwstc.noaa.gov
# from US units ASOS formula
Result = InToHPa(HPaToIn(pressureHPa) * Power10(0.00813 * MToFt(sensorElevationM - stationElevationM) / FToR(CToF(currentTempC))))
return Result
# FIXME: still to do
#class function TWxUtils.AltimeterToStationPressure(pressureHPa: TWxReal;
# elevationM: TWxReal;
# algorithm: TAltimeterAlgorithm = DefaultAltimeterAlgorithm): TWxReal;
#begin
#end;
#}
@staticmethod
def SeaLevelToStationPressure(pressureHPa, elevationM,
currentTempC, meanTempC, humidity,
algorithm = 'paManBar'):
Result = pressureHPa / TWxUtils.PressureReductionRatio(pressureHPa,
elevationM,
currentTempC,
meanTempC,
humidity,
algorithm)
return Result
@staticmethod
def PressureReductionRatio(pressureHPa, elevationM,
currentTempC, meanTempC, humidity,
algorithm = 'paManBar'):
if algorithm == 'paUnivie':
# http://www.univie.ac.at/IMG-Wien/daquamap/Parametergencom.html
geopElevationM = TWxUtils.GeopotentialAltitude(elevationM)
Result = Exp(((TWxUtils.gravity/TWxUtils.gasConstantAir) * geopElevationM) / (TWxUtils.VirtualTempK(pressureHPa, meanTempC, humidity) + (geopElevationM * TWxUtils.standardLapseRate/2)))
elif algorithm == 'paDavisVp':
# http://www.exploratorium.edu/weather/barometer.html
if (humidity > 0):
hCorr = (9.0/5.0) * TWxUtils.HumidityCorrection(currentTempC, elevationM, humidity, 'vaDavisVp')
else:
hCorr = 0
# In the case of DavisVp, take the constant values literally.
Result = Power(10, (MToFt(elevationM) / (122.8943111 * (CToF(meanTempC) + 460 + (MToFt(elevationM) * TWxUtils.vpLapseRateUS/2) + hCorr))))
elif algorithm == 'paManBar':
# see WMO Instruments and Observing Methods Report No.19
# http://www.wmo.int/pages/prog/www/IMOP/publications/IOM-19-Synoptic-AWS.pdf
# http://www.wmo.ch/web/www/IMOP/publications/IOM-19-Synoptic-AWS.pdf
if (humidity > 0):
hCorr = (9.0/5.0) * TWxUtils.HumidityCorrection(currentTempC, elevationM, humidity, 'vaBuck')
else:
hCorr = 0
geopElevationM = TWxUtils.GeopotentialAltitude(elevationM)
Result = Exp(geopElevationM * 6.1454E-2 / (CToF(meanTempC) + 459.7 + (geopElevationM * TWxUtils.manBarLapseRate / 2) + hCorr))
else:
raise ValueError("Unknown PressureReductionRatio algorithm '%s'" %
algorithm)
return Result
@staticmethod
def ActualVaporPressure(tempC, humidity, algorithm='vaBolton'):
result = (humidity * TWxUtils.SaturationVaporPressure(tempC, algorithm)) / 100.0
return result
@staticmethod
def SaturationVaporPressure(tempC, algorithm='vaBolton'):
# comparison of vapor pressure algorithms
# http://cires.colorado.edu/~voemel/vp.html
# (for DavisVP) http://www.exploratorium.edu/weather/dewpoint.html
if algorithm == 'vaDavisVp':
# Davis Calculations Doc
Result = 6.112 * Exp((17.62 * tempC)/(243.12 + tempC))
elif algorithm == 'vaBuck':
# Buck(1996)
Result = 6.1121 * Exp((18.678 - (tempC/234.5)) * tempC / (257.14 + tempC))
elif algorithm == 'vaBuck81':
# Buck(1981)
Result = 6.1121 * Exp((17.502 * tempC)/(240.97 + tempC))
elif algorithm == 'vaBolton':
# Bolton(1980)
Result = 6.112 * Exp(17.67 * tempC / (tempC + 243.5))
elif algorithm == 'vaTetenNWS':
# Magnus Teten
# www.srh.weather.gov/elp/wxcalc/formulas/vaporPressure.html
Result = 6.112 * Power(10,(7.5 * tempC / (tempC + 237.7)))
elif algorithm == 'vaTetenMurray':
# Magnus Teten (Murray 1967)
Result = Power(10, (7.5 * tempC / (237.5 + tempC)) + 0.7858)
elif algorithm == 'vaTeten':
# Magnus Teten
# www.vivoscuola.it/US/RSIGPP3202/umidita/attivita/relhumONA.htm
Result = 6.1078 * Power(10, (7.5 * tempC / (tempC + 237.3)))
else:
raise ValueError("Unknown SaturationVaporPressure algorithm '%s'" %
algorithm)
return Result
@staticmethod
def MixingRatio(pressureHPa, tempC, humidity):
k1 = TWxUtils.moleWater / TWxUtils.moleAir # 0.62198
# http://www.wxqa.com/archive/obsman.pdf
# http://www.vivoscuola.it/US/RSIGPP3202/umidita/attiviat/relhumONA.htm
vapPres = TWxUtils.ActualVaporPressure(tempC, humidity, 'vaBuck')
Result = 1000 * ((k1 * vapPres) / (pressureHPa - vapPres))
return Result
@staticmethod
def VirtualTempK(pressureHPa, tempC, humidity):
epsilon = 1 - (TWxUtils.moleWater / TWxUtils.moleAir) # 0.37802
# http://www.univie.ac.at/IMG-Wien/daquamap/Parametergencom.html
# http://www.vivoscuola.it/US/RSIGPP3202/umidita/attiviat/relhumONA.htm
# http://wahiduddin.net/calc/density_altitude.htm
vapPres = TWxUtils.ActualVaporPressure(tempC, humidity, 'vaBuck')
Result = (CToK(tempC)) / (1-(epsilon * (vapPres/pressureHPa)))
return Result
@staticmethod
def HumidityCorrection(tempC, elevationM, humidity, algorithm='vaBolton'):
vapPress = TWxUtils.ActualVaporPressure(tempC, humidity, algorithm)
Result = (vapPress * ((2.8322E-9 * (elevationM**2)) + (2.225E-5 * elevationM) + 0.10743))
return Result
@staticmethod
def GeopotentialAltitude(geometricAltitudeM):
Result = (TWxUtils.earthRadius45 * 1000 * geometricAltitudeM) / ((TWxUtils.earthRadius45 * 1000) + geometricAltitudeM)
return Result
#==============================================================================
# class TWxUtilsUS
#==============================================================================
class TWxUtilsUS(object):
"""This class provides US unit versions of the functions in uWxUtils.
Refer to uWxUtils for documentation. All input and output paramters are
in the following US units:
pressure in inches of mercury
temperature in Fahrenheit
wind in MPH
elevation in feet"""
@staticmethod
def StationToSensorPressure(pressureIn, sensorElevationFt,
stationElevationFt, currentTempF):
Result = pressureIn / Power10(0.00813 * (sensorElevationFt - stationElevationFt) / FToR(currentTempF))
return Result
@staticmethod
def StationToAltimeter(pressureIn, elevationFt,
algorithm='aaMADIS'):
"""Example:
>>> p = TWxUtilsUS.StationToAltimeter(24.692, 5431, 'aaASOS')
>>> print("Station pressure to altimeter = %.3f" % p)
Station pressure to altimeter = 30.153
"""
Result = HPaToIn(TWxUtils.StationToAltimeter(InToHPa(pressureIn),
FtToM(elevationFt),
algorithm))
return Result
@staticmethod
def StationToSeaLevelPressure(pressureIn, elevationFt,
currentTempF, meanTempF, humidity,
algorithm='paManBar'):
"""Example:
>>> p = TWxUtilsUS.StationToSeaLevelPressure(24.692, 5431, 59.0, 50.5, 40.5)
>>> print("Station to SLP = %.3f" % p)
Station to SLP = 30.006
"""
Result = pressureIn * TWxUtilsUS.PressureReductionRatio(pressureIn,
elevationFt,
currentTempF,
meanTempF,
humidity,
algorithm)
return Result
@staticmethod
def SensorToStationPressure(pressureIn,
sensorElevationFt, stationElevationFt,
currentTempF):
Result = pressureIn * Power10(0.00813 * (sensorElevationFt - stationElevationFt) / FToR(currentTempF))
return Result
@staticmethod
def AltimeterToStationPressure(pressureIn, elevationFt,
algorithm='aaMADIS'):
Result = TWxUtils.AltimeterToStationPressure(InToHPa(pressureIn),
FtToM(elevationFt),
algorithm)
return Result
@staticmethod
def SeaLevelToStationPressure(pressureIn, elevationFt,
currentTempF, meanTempF, humidity,
algorithm='paManBar'):
"""Example:
>>> p = TWxUtilsUS.SeaLevelToStationPressure(30.153, 5431, 59.0, 50.5, 40.5)
>>> print("Station to SLP = %.3f" % p)
Station to SLP = 24.813
"""
Result = pressureIn / TWxUtilsUS.PressureReductionRatio(pressureIn,
elevationFt,
currentTempF,
meanTempF,
humidity,
algorithm)
return Result
@staticmethod
def PressureReductionRatio(pressureIn, elevationFt,
currentTempF, meanTempF, humidity,
algorithm='paManBar'):
Result = TWxUtils.PressureReductionRatio(InToHPa(pressureIn),
FtToM(elevationFt),
FToC(currentTempF),
FToC(meanTempF),
humidity, algorithm)
return Result
@staticmethod
def ActualVaporPressure(tempF, humidity, algorithm='vaBolton'):
Result = (humidity * TWxUtilsUS.SaturationVaporPressure(tempF, algorithm)) / 100
return Result
@staticmethod
def SaturationVaporPressure(tempF, algorithm='vaBolton'):
Result = HPaToIn(TWxUtils.SaturationVaporPressure(FToC(tempF),
algorithm))
return Result
@staticmethod
def MixingRatio(pressureIn, tempF, humidity):
Result = HPaToIn(TWxUtils.MixingRatio(InToHPa(pressureIn),
FToC(tempF), humidity))
return Result
@staticmethod
def HumidityCorrection(tempF, elevationFt, humidity, algorithm='vaBolton'):
Result = TWxUtils.HumidityCorrection(FToC(tempF),
FtToM(elevationFt),
humidity,
algorithm)
return Result
@staticmethod
def GeopotentialAltitude(geometricAltitudeFt):
Result = MToFt(TWxUtils.GeopotentialAltitude(FtToM(geometricAltitudeFt)))
return Result
#==============================================================================
# class TWxUtilsVP
#==============================================================================
class uWxUtilsVP(object):
""" This class contains functions for calculating the raw sensor pressure
of a Vantage Pro weather station from the sea level reduced pressure it
provides.
The sensor pressure can then be used to calcuate altimeter setting using
other functions in the uWxUtils and uWxUtilsUS units.
notes about input parameters:
currentTemp - current instantaneous station temperature
temp12HrsAgoF - temperature from 12 hours ago. If the 12 hour temp is
not known, simply pass the same value as currentTemp
for the 12 hour temp. For the vantage pro sea level
to sensor pressure conversion, the 12 hour temp
should be the hourly temp that is 11 hours to 11:59
in the past. For example, if the current time is
3:59pm, use the 4:00am temp, and if it is currently
4:00pm, use the 5:00am temp. Also, the vantage pro
seems to use only whole degree temp values in the sea
level calculation, so the function performs rounding
on the temperature.
meanTemp - average of current temp and the temperature 12 hours in
the past. If the 12 hour temp is not known, simply pass
the same value as currentTemp for the mean temp. For the
Vantage Pro, the mean temperature should come from the
BARDATA.VirtualTemp. The value in BARDATA is an integer
(whole degrees). The vantage pro calculates the mean by
Round(((Round(currentTempF - 0.01) +
Round(temp12HrsAgoF - 0.01)) / 2) - 0.01);
humidity - Value should be 0 to 100. For the pressure conversion
functions, pass a value of zero if you do not want to
the algorithm to include the humidity correction factor
in the calculation. If you provide a humidity value
> 0, then humidity effect will be included in the
calculation.
elevation - This should be the geometric altitude of the station
(this is the elevation provided by surveys and normally
used by people when they speak of elevation). Some
algorithms will convert the elevation internally into
a geopotential altitude."""
# this function is used if you have access to BARDATA (Davis Serial docs)
# meanTempF is from BARDATA.VirtualTemp
# humidityCorr is from BARDATA.C (remember to first divide C by 10)
@staticmethod
def SeaLevelToSensorPressure_meanT(pressureIn, elevationFt, meanTempF,
humidityCorr):
Result = TWxUtilsUS.SeaLevelToStationPressure(
pressureIn, elevationFt, meanTempF,
meanTempF + humidityCorr, 0, 'paDavisVp')
return Result
# this function is used if you do not have access to BARDATA. The function
# will internally calculate the mean temp and the humidity correction
# the would normally come from the BARDATA.
# currentTempF is the value of the current sensor temp
# temp12HrsAgoF is the temperature from 12 hours ago (see comments on
# temp12Hr from earlier in this document for more on this).
@staticmethod
def SeaLevelToSensorPressure_12(pressureIn, elevationFt, currentTempF,
temp12HrsAgoF, humidity):
Result = TWxUtilsUS.SeaLevelToStationPressure(
pressureIn, elevationFt, currentTempF,
Round(((Round(currentTempF - 0.01) + Round(temp12HrsAgoF - 0.01)) / 2) - 0.01),
humidity, 'paDavisVp')
return Result
if __name__ == "__main__":
import doctest
if not doctest.testmod().failed:
print("PASSED")
| [
"[email protected]"
] | |
5eb5f1b1e17df3c5a4a386f4ebd33e63991af2cd | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/1324501/snippet.py | 1c0cb3d7b8a1a399f9b0fbe7ad92bea55209c524 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 5,926 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os, sys
import struct as st
import time as tm
uint32 = '>I'
int32 = '>i'
uint16 = '>H'
int16 = '>h'
uint8 = 'B'
int8 = 'b'
#file header
file_version_number_fmt = uint32
app_version_number_fmt = uint32
idtag_length_fmt = uint16
length_length_fmt = uint16
#struct record header
tag_id_fmt = uint8 #=1byte file header idtag_length
length_fmt = uint16 #=2bytes file header length_length
#records const
recdomain = 0x01
recpath = 0x02
reccookie = 0x03
#msb flags
msb = 0x80
enddomain = (msb | 0x04)
endpath = (msb | 0x05)
#indent size
doind = 0
indent = 0
def readfmt(f, fmt):
size = st.calcsize(fmt)
data = f.read(size)
if len(data) < size: raise EOFError('in readfmt, at pos:', f.tell())
return st.unpack(fmt, data)[0]
def read(f, ln):
data = f.read(ln)
if len(data) < ln: raise EOFError('in read, at pos:', f.tell())
return data
def getfhdr(f):
fhdr = {}
fhdr[1] = readfmt(f, file_version_number_fmt)
fhdr[2] = readfmt(f, app_version_number_fmt)
fhdr[3] = readfmt(f, idtag_length_fmt)
fhdr[4] = readfmt(f, length_length_fmt)
return fhdr
def getrecid(f):
return readfmt(f, tag_id_fmt)
def getrechdr(f, hdr):
global doind
if (msb & hdr):
if enddomain == hdr:
doind = -1
return True, 'end of domain record', 0
elif endpath == hdr:
return True, 'end of path record', 0
else:
return True, 'end of unknown record', 0,
else:
datalen = readfmt(f, length_fmt)
if recdomain == hdr:
doind = 1
return False, 'domain record', datalen
elif recpath == hdr:
return False, 'path record', datalen
elif reccookie == hdr:
return False, 'cookie record', datalen
else:
return False, 'unknown record', datalen
def getddidname(did):
flag = True if msb & did else False
if 0x1e == did: name = 'name of the domain part'
elif 0x1f == did: name = 'how cookies are filtered for this domain.'
elif 0x21 == did: name = 'handling of cookies that have explicit paths'
elif 0x25 == did: name = 'filter third party cookies mode'
else: name = 'unknown domain data id'
return flag, name
def getpdidname(did):
flag = True if msb & did else False
if 0x1d == did: name = 'the name of the path part'
else: name = 'unknown path data id'
return flag, name
def getcdidname(did):
flag = True if msb & did else False
if 0x10 == did: name = 'name of the cookie'
elif 0x11 == did: name = 'value of the cookie'
elif 0x12 == did: name = 'expiry'
elif 0x13 == did: name = 'last used'
elif 0x14 == did: name = 'comment/description of use'
elif 0x15 == did: name = 'URL for comment/description of use'
elif 0x16 == did: name = 'domain received with version=1 cookies'
elif 0x17 == did: name = 'path received with version=1 cookies'
elif 0x18 == did: name = 'port limitations received with version=1 cookies'
elif 0x1A == did: name = 'version number of cookie'
elif (0x19 | msb) == did: name = 'will only be sent to https servers'
elif (0x1b | msb) == did: name = 'will only be sent to the server that sent it'
elif (0x1c | msb) == did: name = 'reserved for delete protection: not yet implemented'
elif (0x20 | msb) == did: name = 'will not be sent if the path is a prefix of the url'
elif (0x22 | msb) == did: name = 'was set as the result of a password login form'
elif (0x23 | msb) == did: name = 'was set as the result of a http authentication login'
elif (0x24 | msb) == did: name = 'was set by a third party server'
else: name = 'unknown cookie data id'
return flag, name
def printt(*args):
global indent
if 0 > indent:
print('indent:', indent)
print('\t'*indent, *args, sep='')
def prsdomaindata(f, d):
end = f.tell() + d[2]
data = []
while f.tell() < end:
did = readfmt(f, tag_id_fmt)
flag, dnam = getddidname(did)
if not flag:
dlen = readfmt(f, length_fmt)
draw = read(f, dlen)
else:
dlen = 0
draw = ''
data.append((hex(did), dnam, dlen, draw))
printt(data)
return data
def prspathdata(f, d):
end = f.tell() + d[2]
data = []
while f.tell() < end:
did = readfmt(f, tag_id_fmt)
flag, dnam = getpdidname(did)
if not flag:
dlen = readfmt(f, length_fmt)
draw = read(f, dlen)
else:
dlen = 0
draw = ''
data.append((hex(did), dnam, dlen, draw))
printt(data)
return data
def frmtm(t):
unp = st.unpack('>Q', t)[0]
return tm.strftime('%Y-%m-%d %H:%M:%S', tm.localtime(unp))
def prscookiedata(f, d):
end = f.tell() + d[2]
data = []
while f.tell() < end:
did = readfmt(f, tag_id_fmt)
flag, dnam = getcdidname(did)
if not flag:
dlen = readfmt(f, length_fmt)
draw = read(f, dlen)
if 0x12==did or 0x13==did:
draw = frmtm(draw)
else:
dlen = 0
draw = ''
data.append((hex(did), dnam, dlen, draw))
printt(data)
return data
def process(f):
global indent
global doind
f.seek(0, os.SEEK_END)
fend = f.tell()
f.seek(0, os.SEEK_SET)
fhdr = getfhdr(f)
print('file_version_number', fhdr[1])
print('app_version_number', fhdr[2])
print('idtag_length', fhdr[3])
print('length_length', fhdr[4])
if (fhdr[3] != 1) and (fhdr[4] != 2):
printt('Not compatible')
return
while(f.tell() < fend):
try:
rhdr = getrecid(f)
rdat = getrechdr(f, rhdr)
if True == rdat[0]:
indent += doind
doind = 0
printt(rdat[1])
elif False == rdat[0]:
printt(rdat[1])
indent += doind
doind = 0
if rhdr == recdomain:
prsdomaindata(f, rdat)
elif rhdr == recpath:
prspathdata(f, rdat)
elif rhdr == reccookie:
prscookiedata(f, rdat)
else:
printt('parse unknown record')
except EOFError as e:
printt('EOFError', e)
input()
break
def main():
if len(sys.argv) == 2 and os.path.exists(sys.argv[1]):
file = open(sys.argv[1], 'rb')
process(file)
file.close()
if __name__ == '__main__':
try:
main()
except:
import traceback
print('Unhandled exception:\n')
traceback.print_exc()
input('\nPress any key...')
| [
"[email protected]"
] | |
6e87b1af4210c206f1bb715e1b4a99fe7d50ad0a | 093b9569be9d1c4e5daf92efbebc38f680917b2d | /.history/base/models_20210828170925.py | 1ae5275cea498d2f694b609a8d77a0d76c347858 | [] | no_license | Justin-Panagos/todoList | 95b1e97ff71af1b0be58e7f8937d726a687cea4d | 10539219b59fcea00f8b19a406db3d4c3f4d289e | refs/heads/master | 2023-08-04T13:27:13.309769 | 2021-08-29T14:06:43 | 2021-08-29T14:06:43 | 400,827,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title =
description =
complete =
create = | [
"[email protected]"
] | |
1cfdc64b037a1a5b23091e1f5821332e1768d22a | 32712c478ff9dff44de085cb50a1302bfc2eba67 | /users/migrations/0003_auto_20200409_0846.py | 6d5dd5bce835963096d9c8f9075a6418cf788858 | [
"MIT"
] | permissive | vas3k/vas3k.club | 158af17c329fe693178ca1bce36466922604df3b | b3ff2fd95ef1d6c593c57d3bcd501240f2705fbb | refs/heads/master | 2023-09-03T07:10:10.859004 | 2023-09-01T09:08:32 | 2023-09-01T09:08:32 | 254,190,180 | 697 | 326 | MIT | 2023-09-04T09:02:12 | 2020-04-08T20:11:44 | Python | UTF-8 | Python | false | false | 766 | py | # Generated by Django 3.0.4 on 2020-04-09 08:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200408_1516'),
]
operations = [
migrations.AlterModelOptions(
name='tag',
options={'ordering': ['group', '?']},
),
migrations.AddField(
model_name='tag',
name='index',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='tag',
name='group',
field=models.CharField(choices=[('hobbies', 'Хобби'), ('personal', 'Личное'), ('other', 'Остальное')], default='other', max_length=32),
),
]
| [
"[email protected]"
] | |
1ef9484581829929eb621c45569e4ed19c1bce55 | 6909de83dd90ee1169d6c453c327ab2ce2687485 | /scheme/abstract_turtle/turtle_class.py | cfdc404d2d77783de78399d470c4e8d41ec12522 | [] | no_license | dantefung/cs61a-2021-summer | 730cb0b9ab7327c32c619779d71882531bf328dd | 4f22f20fcfddfb5bf121081919310413209da1b2 | refs/heads/master | 2023-08-19T14:51:27.380738 | 2021-11-01T06:54:33 | 2021-11-01T06:54:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,497 | py | from functools import wraps
from math import pi, sin, cos, copysign
from .model import Color, Position, DrawnTurtle, Mode, LineTo, Arc
from .canvas import Canvas
def turtle_method(func):
"""
Marks the given method as one that needs to be placed in global.
"""
func.is_turtle_method = True
return func
def make_formode():
handlers = {}
def formode(mode):
def decorator(func):
@wraps(func)
def error(self, *args, **kwargs):
raise RuntimeError(
"Handler not available for mode: {}".format(self._BaseTurtle__mode)
)
prev = handlers.get(func.__name__, error)
@wraps(func)
def handler(self, *args, **kwargs):
if self._BaseTurtle__mode == mode:
return func(self, *args, **kwargs)
else:
return prev(self, *args, **kwargs)
handlers[func.__name__] = handler
return handler
return decorator
return formode
formode = make_formode()
class BaseTurtle:
"""
Manages all the basic turtle functionality. The other turtle methods can be expressed in terms of these.
"""
def __init__(self, canvas):
if not isinstance(canvas, Canvas):
raise RuntimeError(
"Expected the argument to Turtle to be of type {} but was {} of type {}".format(
Canvas.__name__, canvas, type(canvas).__name__
)
)
self.__canvas = canvas
self.__x = 0
self.__y = 0
self.__line_width = 1
self.__theta = 0
self.__pen_color = Color(0, 0, 0)
self.__fill_color = Color(0, 0, 0)
self.__pen_down = True
self.__degrees = 360
self.__path = None
self.__turtle_is_shown = True
self.__turtle_stretch_wid = 1
self.__turtle_stretch_len = 1
self.__pixel_size = 1
self.__mode = Mode.STANDARD
self.__speed = 3 # default from the normal turtle module
self.__update_turtle()
@turtle_method
def goto(self, x, y):
"""
Go to the given position (X, Y).
"""
if self.__pen_down:
self.__canvas.draw_line(
self.__current_pos,
Position(x, y),
self.__pen_color,
self.__line_width,
)
self.__x = x
self.__y = y
if self.filling():
self.__path.append(LineTo(self.__current_pos))
self.__update_turtle()
setpos = setposition = goto
@turtle_method
def forward(self, amount):
"""
Move forward the given amount.
"""
self.goto(
self.xcor() + amount * cos(self.__theta),
self.ycor() + amount * sin(self.__theta),
)
fd = forward
@turtle_method
def setheading(self, heading):
"""
Set the heading to the given value in degrees
"""
self.__theta = self.__to_real_angle(heading)
self.__update_turtle()
seth = setheading
@turtle_method
def circle(self, radius, extent=None):
"""
Draw a circle starting at the given point with the given RADIUS and EXTENT. If EXTENT exists, draw only the
first EXTENT degrees of the circle. If RADIUS is positive, draw in the counterclockwise direction.
Otherwise, draw in the clockwise direction.
"""
if extent is None:
extent = self.__degrees
extent = extent / self.__degrees * (2 * pi)
center = Position(
self.__current_pos.x - radius * sin(self.__theta),
self.__current_pos.y + radius * cos(self.__theta),
)
angle_change = copysign(1, radius) * extent
start_angle = self.__theta - pi / 2 * copysign(1, radius)
end_angle = start_angle + angle_change
if self.filling():
self.__path.append(Arc(center, abs(radius), start_angle, end_angle))
if self.__pen_down:
if radius * extent < 0:
start_angle, end_angle = end_angle, start_angle
self.__canvas.draw_circle(
center,
abs(radius),
self.__pen_color,
self.__line_width,
False,
start_angle,
end_angle,
)
final_pos = Position(
center.x + radius * sin(self.__theta + angle_change),
center.y - radius * cos(self.__theta + angle_change),
)
self.__theta += angle_change
self.__x, self.__y = final_pos.x, final_pos.y
self.__update_turtle()
@turtle_method
def dot(self, size=None):
"""
Draw a dot at the current location. If size is not specified, set it to
(pensize + 4, pensize * 2)
"""
if size is None:
size = max(self.__line_width + 4, self.__line_width * 2)
if self.__pen_down:
self.__canvas.draw_circle(
self.__current_pos,
size,
self.__pen_color,
self.__line_width,
True,
0,
2 * pi,
)
@turtle_method
def pixel(self, x, y, *color):
"""
Fill in a square of size pixel_size at (x * pixel_size, y * pixel_size) with the given color.
"""
d = self.__pixel_size
self.__canvas.axis_aligned_rectangle(
Position(x * d, y * d), d, d, self.__convert_color(*color)
)
@turtle_method
def pixel_size(self, pixel_size):
if not isinstance(pixel_size, int) or pixel_size <= 0:
raise ValueError(
"Expected a positive integer for pixel_size but got {}".format(
pixel_size
)
)
self.__pixel_size = pixel_size
@turtle_method
def canvas_width(self):
"""
Return the current screen size in pixel units
"""
return self.__canvas.width // self.__pixel_size
@turtle_method
def canvas_height(self):
"""
Return the current screen size in pixel units
"""
return self.__canvas.height // self.__pixel_size
@turtle_method
def xcor(self):
"""
Get the current x coordinate
"""
return self.__x
@turtle_method
def ycor(self):
"""
Get the current y coordinate
"""
return self.__y
@turtle_method
def heading(self):
"""
Get the current heading
"""
return self.__from_real_angle(self.__theta)
@turtle_method
def degrees(self, amount=360):
"""
Set the number of degrees in a circle
"""
self.__degrees = amount
@turtle_method
def pendown(self):
"""
Do draw when moving
"""
self.__pen_down = True
pd = down = pendown
@turtle_method
def penup(self):
"""
Do not draw when moving
"""
self.__pen_down = False
pu = up = penup
@turtle_method
def pensize(self, width=None):
"""
Set or get the pen size. If WIDTH is None, get it, otherwise set it.
"""
if width is None:
return self.__line_width
self.__line_width = width
width = pensize
@turtle_method
def isdown(self):
"""
Return if the pen is down or not
"""
return self.__pen_down
@turtle_method
def pencolor(self, *color):
"""
Set the pen color as COLOR
"""
self.__pen_color = self.__convert_color(*color)
@turtle_method
def fillcolor(self, *color):
"""
Set the fill color as COLOR
"""
self.__fill_color = self.__convert_color(*color)
@turtle_method
def filling(self):
"""
Return whether the canvas is filling.
"""
return self.__path is not None
@turtle_method
def begin_fill(self):
"""
Begin setting the polygon to fill
"""
self.__path = [LineTo(self.__current_pos)]
@turtle_method
def end_fill(self):
"""
End setting the polygon to fill, and fill it in.
"""
if self.__path is None:
return
self.__canvas.fill_path(self.__path, self.__fill_color)
self.__path = None
@turtle_method
def clear(self):
"""
Clear the canvas, but do not move the turtle.
"""
self.__canvas.clear()
@turtle_method
def bgcolor(self, *color):
self.__canvas.set_bgcolor(self.__convert_color(*color))
def __update_turtle(self):
if self.__turtle_is_shown:
self.__canvas.turtle = DrawnTurtle(
self.__current_pos,
self.__theta % (2 * pi),
self.__turtle_stretch_wid,
self.__turtle_stretch_len,
)
else:
self.__canvas.turtle = None
@turtle_method
def hideturtle(self):
"""
Hide the turtle from the canvas.
"""
self.__turtle_is_shown = False
self.__update_turtle()
ht = hideturtle
@turtle_method
def showturtle(self):
"""
Show the turtle on the canvas
"""
self.__turtle_is_shown = True
self.__update_turtle()
st = showturtle
@turtle_method
def isvisible(self):
"""
Return whether the turtle is visible
"""
return self.__turtle_is_shown
@turtle_method
def shapesize(self, stretch_wid=None, stretch_len=None):
self.__turtle_stretch_wid = stretch_wid
self.__turtle_stretch_len = stretch_len
self.__update_turtle()
turtlesize = shapesize
@turtle_method
def mode(self, mode=None):
if mode is None:
return self.__mode.value
elif mode == "standard":
self.__mode = Mode.STANDARD
elif mode == "logo":
self.__mode = Mode.LOGO
elif mode == "world":
raise RuntimeError("Custom world coordinates not supported.")
else:
raise RuntimeError("Unknown mode: {}".format(mode))
self.goto(0, 0)
self.setheading(0)
self.clear()
@turtle_method
def speed(self, speed=None):
if speed is None:
return self.__speed
self.__speed = speed
self.__canvas.set_speed(speed)
@turtle_method
def exitonclick(self):
return self.__canvas.exit_on_click()
@property
def __current_pos(self):
return Position(self.__x, self.__y)
@formode(Mode.STANDARD)
def __to_real_angle(self, amount):
return (amount / self.__degrees) * (2 * pi)
@formode(Mode.STANDARD)
def __from_real_angle(self, angle):
return (angle / (2 * pi)) * self.__degrees % self.__degrees
@formode(Mode.LOGO)
def __to_real_angle(self, amount):
return (1 / 4 - amount / self.__degrees) * (2 * pi)
@formode(Mode.LOGO)
def __from_real_angle(self, angle):
return (1 / 4 - angle / (2 * pi)) * self.__degrees % self.__degrees
@staticmethod
def __convert_color(*color):
return Color.of(*color)
class Turtle(BaseTurtle):
"""
This entire class should only use public methods of the BaseTurtle class.
"""
@turtle_method
def backward(self, amount):
"""
Move backward the given amount.
"""
self.forward(-amount)
bk = back = backward
@formode(Mode.STANDARD)
def right(self, amount):
self.setheading(self.heading() - amount)
@turtle_method
@formode(Mode.LOGO)
def right(self, amount):
"""
Rotate right the given amount.
"""
self.setheading(self.heading() + amount)
rt = right
@turtle_method
def left(self, amount):
"""
Rotate left the given amount.
"""
self.right(-amount)
lt = left
@turtle_method
def setx(self, x):
"""
Move so that the x coordinate is X
"""
self.goto(x, self.xcor())
@turtle_method
def sety(self, y):
"""
Move so that the y coordinate is Y
"""
self.goto(self.xcor(), y)
@turtle_method
def home(self):
"""
Set location to (0, 0) and set heading to 0
"""
self.goto(0, 0)
self.setheading(0)
@turtle_method
def position(self):
"""
Get the current position as a tuple
"""
return self.xcor(), self.ycor()
pos = position
@turtle_method
def distance(self, other):
"""
Get the distance between this and the other location/turtle.
"""
if isinstance(other, Turtle):
return self.distance(other.position())
x, y = other
return ((x - self.xcor()) ** 2 + (y - self.ycor()) ** 2) ** 0.5
@turtle_method
def radians(self):
"""
Set angle units to radians
"""
return self.degrees(2 * pi)
@turtle_method
def color(self, *color):
"""
Set both the pen and fill colors
"""
self.pencolor(*color)
self.fillcolor(*color)
@turtle_method
def reset(self):
self.home()
self.clear()
| [
"[email protected]"
] | |
6d77d52b9ef9e62676c4faa6684812efe2718167 | 770accfc4da8988db9b91d435623bc4f0860d6e8 | /src/race/scripts/keyboard.py | 79399f36cd172cbb9d8930627d46e9e51bff53a5 | [
"MIT"
] | permissive | mgoli-cav/Platooning-F1Tenth | 82eb4f79296b4e48b27b077ab68ce5d15a9281a0 | f8b929f0669cd721d861333286995b2feef2641e | refs/heads/master | 2022-11-12T00:21:46.348447 | 2020-06-19T22:26:56 | 2020-06-19T22:26:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | #!/usr/bin/env python
import rospy
from race.msg import drive_param
import sys, select, termios, tty
pub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)
keyBindings = {
'w':(1,0),
'd':(1,-1),
'a':(1,1),
's':(-1,0),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = 0.5
turn = 0.25
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('keyboard', anonymous=True)
x = 0
th = 0
status = 0
try:
while(1):
key = getKey()
if key in keyBindings.keys():
x = keyBindings[key][0]
th = keyBindings[key][1]
else:
x = 0
th = 0
if (key == '\x03'):
break
msg = drive_param()
msg.velocity = x*speed
msg.angle = th*turn
pub.publish(msg)
except:
print 'error'
finally:
msg = drive_param()
msg.velocity = 0
msg.angle = 0
pub.publish(msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| [
"[email protected]"
] | |
32944cfd8beb4be9ec3e8b9a0a5c40fbcdbc8659 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2530/60785/314884.py | 6203abf12fc334b23c624843c641e54b233f7d1e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | s=list(input())
t=list(input())
indic=[]
others=[]
res=[]
dic=dict()
for i in range(len(s)):
dic.update({s[i]:i})
for i in range(len(t)):
if t[i] not in s:
others.append(t[i])
else:
indic.append(dic[t[i]])
indic.sort()
#or i in range(len(indic)):
#es.append()
print(dic.items()) | [
"[email protected]"
] | |
072d0dba6c0ca264afdf8e312adc7a46a009aedc | c83e356d265a1d294733885c373d0a4c258c2d5e | /mayan/apps/mirroring/tests/literals.py | bf6b4866cd5651e325b438c1c41fa096b2780241 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3 | 4160809d2c96707a196b8c94ea9e4df1a119d96a | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | refs/heads/master | 2023-08-21T23:36:41.230179 | 2021-10-02T03:51:12 | 2021-10-02T03:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # -*- coding: utf-8 -*-
TEST_CACHE_KEY_BAD_CHARACTERS = ' \r\n!@#$%^&*()+_{}|:"<>?-=[];\',./'
TEST_DOCUMENT_PK = 99
TEST_KEY_UNICODE = 'áéíóúüäåéë¹²³¤'
TEST_KEY_UNICODE_HASH = 'ba418878794230c3f4308e66c70db31dd83f1def4d9381f379c50f42eb88989c'
TEST_NODE_EXPRESSION = 'level_1'
TEST_NODE_EXPRESSION_INVALID = 'level/1'
TEST_NODE_EXPRESSION_MULTILINE = 'first\r\nsecond\r\nthird'
TEST_NODE_EXPRESSION_MULTILINE_EXPECTED = 'first second third'
TEST_NODE_EXPRESSION_MULTILINE_2 = '\r\n\r\nfirst\r\nsecond\r\nthird\r\n'
TEST_NODE_EXPRESSION_MULTILINE_2_EXPECTED = 'first second third'
TEST_NODE_PK = 88
TEST_PATH = '/test/path'
| [
"[email protected]"
] | |
4bbee182075a9344afaaa9db03b894910f5cdeda | db58ec54f85fd8d4ef6904529c5b17393ee041d8 | /hacker-rank/30-days-of-code/3-intro-to-conditional-statements.py | a18e87e87351ccafc3ade2a2b6eb8288cf2008aa | [
"MIT"
] | permissive | washimimizuku/python-data-structures-and-algorithms | 90ae934fc7d2bac5f50c18e7fbc463ba0c026fa4 | 537f4eabaf31888ae48004d153088fb28bb684ab | refs/heads/main | 2023-08-28T07:45:19.603594 | 2021-11-08T07:53:52 | 2021-11-08T07:53:52 | 334,213,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | #!/bin/python3
'''
https://www.hackerrank.com/challenges/30-conditional-statements
Given an integer, n, perform the following conditional actions:
1. If n is odd, print Weird
2. If n is even and in the inclusive range of 2 to 5, print Not Weird
3. If n is even and in the inclusive range of 6 to 20, print Weird
4. If n is even and greater than 20, print Not Weird
Complete the stub code provided in your editor to print whether or not n is weird.
'''
if __name__ == '__main__':
N = int(input())
if (N % 2):
print('Weird')
else:
if (N >= 6 and N <= 20):
print('Weird')
else:
print('Not Weird')
| [
"[email protected]"
] | |
76ab20b11cb7917130d6313dd70fc7c2204486ac | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /HvsBiHLGcsv2ex3gv_17.py | 9eeb06382887e34dbf89828fd97e2c5c4b26d16d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py |
import math
def shortestDistance(txt):
x1, y1, x2, y2 = list(map(float, txt.split(',')))
return round(((x1 - x2)**2 + (y1 - y2)**2)**.5, 2)
| [
"[email protected]"
] | |
8a6bdf502895e1b6a3423a37821464cc67432269 | a1c20ec292350a4c8f2164ba21715414f8a77d19 | /edx/5_Joynernacci.py | 968c322ff5cda6dc679e9a0c92205cd2d9a9b575 | [] | no_license | nowacki69/Python | 2ae621b098241a614836a50cb1102094bf8e689f | e5325562801624e43b3975d9f246af25517be55b | refs/heads/master | 2021-08-22T21:30:39.534186 | 2019-09-03T01:21:11 | 2019-09-03T01:21:11 | 168,528,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | #Remember that Fibonacci's sequence is a sequence of numbers
#where every number is the sum of the previous two numbers.
#
#Joynernacci numbers are similar to Fibonacci numbers, but
#with two differences:
#
# - Fibonacci numbers are famous, Joynernacci numbers are
# not (yet).
# - In Joynernacci numbers, even-indexed numbers are the
# sum of the previous two numbers, while odd-indexed
# numbers are the absolute value of the difference
# between the previous two numbers.
#
#For example: the Joynernacci sequence starts with 1 and 1
#as the numbers at index 1 and 2. 3 is an odd index, so
#the third number would be 0 (1 - 1 = 0). 4 is an even
#index, so the fourth number would be 1 (0 + 1). 5 is an
#odd index, so the fifth number would be 1 (1 - 0). And
#so on.
#
#The first several Joynernacci numbers (and their indices)
#are thus:
#
# 1 1 0 1 1 2 1 3 2 5 3 8 5 13 8 21 13 34 21
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#
#Write a function called joynernacci that returns the nth
#Joynernacci number. For example:
#
# joynernacci(5) -> 1
# joynernacci(12) -> 8
#
#We recommend implementing joynernacci recursively, but it
#is not required.
#Write your code here!
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
#print: 1, then 8
print(joynernacci(5))
print(joynernacci(12))
| [
"[email protected]"
] | |
4b9b8076b1e6d35fef5e2416fbeac00933997a38 | 0ed9a8eef1d12587d596ec53842540063b58a7ec | /cloudrail/knowledge/context/environment_context/environment_context_defaults_merger.py | c0aa6175324c16da21d47dff9af6a602a4d700ef | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cbc506/cloudrail-knowledge | 8611faa10a3bf195f277b81622e2590dbcc60da4 | 7b5c9030575f512b9c230eed1a93f568d8663708 | refs/heads/main | 2023-08-02T08:36:22.051695 | 2021-09-13T15:23:33 | 2021-09-13T15:24:26 | 390,127,361 | 0 | 0 | MIT | 2021-07-27T21:08:06 | 2021-07-27T21:08:06 | null | UTF-8 | Python | false | false | 308 | py | from abc import abstractmethod
from cloudrail.knowledge.context.base_environment_context import BaseEnvironmentContext
class EnvironmentContextDefaultsMerger:
@staticmethod
@abstractmethod
def merge_defaults(scanner_ctx: BaseEnvironmentContext, iac_ctx: BaseEnvironmentContext):
pass
| [
"[email protected]"
] | |
646ef078497ec966d63ba0eeb3fa364d0ebe238c | 6aafb5d3e6204a442c0f1afad02c4ab6cbbc290f | /event/mention/models/rule_detectors.py | 728234afa512dcd4677e2d15c7f148ad2ecf8106 | [
"Apache-2.0"
] | permissive | Aditi138/DDSemantics | 22ae526d91e41b728050b6adae8847c6106b7d91 | c76010ea44b21810057c82a7ddbd1c6a3568f5f4 | refs/heads/master | 2020-03-21T11:21:48.622464 | 2018-06-24T15:39:31 | 2018-06-24T15:39:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,116 | py | import math
from event.io.ontology import OntologyLoader
class MentionDetector:
def __init__(self, **kwargs):
super().__init__()
self.unknown_type = "UNKNOWN"
def predict(self, *input):
pass
class BaseRuleDetector(MentionDetector):
def __init__(self, config, token_vocab):
super().__init__()
self.onto = OntologyLoader(config.ontology_path)
self.token_vocab = token_vocab
def predict(self, *input):
words, _, l_feature, word_meta, sent_meta = input[0]
center = math.floor(len(words) / 2)
lemmas = [features[0] for features in l_feature]
words = self.token_vocab.reveal_origin(words)
return self.predict_by_word(words, lemmas, l_feature, center)
def predict_by_word(self, words, lemmas, l_feature, center):
raise NotImplementedError('Please implement the rule detector.')
class MarkedDetector(BaseRuleDetector):
"""
Assume there is one field already marked with event type.
"""
def __init__(self, config, token_vocab, marked_field_index=-2):
super().__init__(config, token_vocab)
self.marked_field_index = marked_field_index
def predict(self, *input):
words, _, l_feature, word_meta, sent_meta = input[0]
center = math.floor(len(words) / 2)
return l_feature[center][self.marked_field_index]
class FrameMappingDetector(BaseRuleDetector):
def __init__(self, config, token_vocab):
super().__init__(config, token_vocab)
self.lex_mapping = self.load_frame_lex(config.frame_lexicon)
self.entities, self.events, self.relations = self.load_wordlist(
config.entity_list, config.event_list, config.relation_list
)
self.temp_memory = set()
def load_frame_lex(self, frame_path):
import xml.etree.ElementTree as ET
import os
ns = {'berkeley': 'http://framenet.icsi.berkeley.edu'}
lex_mapping = {}
for file in os.listdir(frame_path):
with open(os.path.join(frame_path, file)) as f:
tree = ET.parse(f)
frame = tree.getroot()
frame_name = frame.get('name')
lexemes = []
for lexUnit in frame.findall('berkeley:lexUnit', ns):
lex = lexUnit.get('name')
lexeme = lexUnit.findall(
'berkeley:lexeme', ns)[0].get('name')
lexemes.append(lexeme)
lex_text = ' '.join(lexemes)
if lex_text not in lex_mapping:
lex_mapping[lex_text] = []
lex_mapping[lex_text].append(frame_name)
return lex_mapping
def load_wordlist(self, entity_file, event_file, relation_file):
events = {}
entities = {}
relations = {}
with open(event_file) as fin:
for line in fin:
parts = line.strip().split()
if len(parts) == 2:
word, ontology = line.strip().split()
events[word] = ontology
with open(entity_file) as fin:
for line in fin:
parts = line.strip().split()
if len(parts) == 2:
word, ontology = line.strip().split()
entities[word] = ontology
with open(relation_file) as fin:
for line in fin:
parts = line.strip().split()
if parts:
event_type = parts[0]
args = parts[1:]
if event_type not in relations:
relations[event_type] = {}
for arg in args:
arg_role, arg_types = arg.split(":")
relations[event_type][arg_role] = arg_types.split(",")
return entities, events, relations
def predict_by_word(self, words, lemmas, l_feature, center):
event_types = []
center_word = words[center]
center_lemma = lemmas[center]
# if center_lemma in self.lex_mapping:
# for fname in self.lex_mapping[center_lemma]:
# event_types.append(('FrameNet', fname))
# if center_word in self.events:
# event_type = self.events[center_word]
#
# if center_lemma in self.events:
# event_type = self.events[center_lemma]
# pos_list = [features[1] for features in l_feature]
# deps = [(features[2], features[3]) for features in l_feature]
# if not event_type == self.unknown_type:
# res = self.predict_args(center, event_type, lemmas, pos_list,
# deps)
#
# for role, entity in res.items():
# if entity:
# index, entity_type = entity
# features = l_feature[index]
# args[role] = index, entity_type
return event_types
def predict_args(self, center, event_type, context, pos_list, deps):
if event_type not in self.relations:
return {}
expected_relations = self.relations[event_type]
expected_relations["Location"] = ["Loc", "GPE"]
expected_relations["Time"] = ["Time"]
filled_relations = dict([(k, None) for k in expected_relations])
num_to_fill = len(filled_relations)
relation_lookup = {}
for role, types in expected_relations.items():
for t in types:
relation_lookup[t] = role
for distance in range(1, center + 1):
left = center - distance
right = center + distance
left_lemma = context[left]
right_lemma = context[right]
if left_lemma in self.entities:
arg_type = self.check_arg(context[center], event_type,
left_lemma, deps)
if arg_type in relation_lookup:
possible_rel = relation_lookup[arg_type]
if filled_relations[possible_rel] is None:
filled_relations[possible_rel] = (left, arg_type)
num_to_fill -= 1
if right_lemma in self.entities:
arg_type = self.check_arg(context[center], event_type,
right_lemma, deps)
if arg_type in relation_lookup:
possible_rel = relation_lookup[arg_type]
if filled_relations[possible_rel] is None:
filled_relations[possible_rel] = (right, arg_type)
num_to_fill -= 1
if num_to_fill == 0:
break
return filled_relations
def check_arg(self, predicate, event_type, arg_lemma, features):
unknown_type = "O"
entity_type = unknown_type
if arg_lemma in self.entities:
entity_type = self.entities[arg_lemma]
if not entity_type == unknown_type:
return entity_type
return None
| [
"[email protected]"
] | |
cead45ed6889c6bfe7441b3f734ec81df2680bf8 | 1ec4cd1417ae392deafd52b187cae61964efc8eb | /pheasant/main.py | 098fb43f7e3ea78e4ada77ada1c8a29c771a699c | [
"MIT"
] | permissive | pkestene/pheasant | 4cdfdfeb8a0529ef4ff589698871aea8ee2d2679 | c9465f4fa1bacd7488c049882cb0663bad33feb9 | refs/heads/master | 2022-10-05T19:46:07.647962 | 2020-06-02T22:21:12 | 2020-06-02T22:21:12 | 269,784,793 | 0 | 0 | MIT | 2020-06-05T21:40:27 | 2020-06-05T21:40:26 | null | UTF-8 | Python | false | false | 6,719 | py | import os
import sys
import click
import yaml
from pheasant import __version__
from pheasant.core.page import Pages
pgk_dir = os.path.dirname(os.path.abspath(__file__))
version_msg = f"{__version__} from {pgk_dir} (Python {sys.version[:3]})."
@click.group(invoke_without_command=True)
@click.pass_context
@click.version_option(version_msg, "-V", "--version")
def cli(ctx):
if ctx.invoked_subcommand is None:
prompt()
ext_option = click.option(
"-e",
"--ext",
default="md,py",
show_default=True,
help="File extension(s) separated by commas.",
)
max_option = click.option(
"--max", default=100, show_default=True, help="Maximum number of files."
)
paths_argument = click.argument("paths", nargs=-1, type=click.Path(exists=True))
@cli.command(help="Run source files and save the caches.")
@click.option("-r", "--restart", is_flag=True, help="Restart kernel after run.")
@click.option("-s", "--shutdown", is_flag=True, help="Shutdown kernel after run.")
@click.option("-f", "--force", is_flag=True, help="Delete cache and run.")
@click.option(
"-v", "--verbose", count=True, help="Print input codes and/or outputs from kernel."
)
@ext_option
@max_option
@paths_argument
def run(paths, ext, max, restart, shutdown, force, verbose):
pages = Pages(paths, ext).collect()
length = len(pages)
click.secho(f"collected {length} files.", bold=True)
if length > max: # pragma: no cover
click.secho("Too many files. Aborted.", fg="yellow")
sys.exit()
if force:
for page in pages:
if page.has_cache:
page.cache.delete()
click.echo(page.cache.path + " was deleted.")
from pheasant.core.pheasant import Pheasant
converter = Pheasant(restart=restart, shutdown=shutdown, verbose=verbose)
converter.jupyter.safe = True
set_config(converter)
converter.convert_from_files(page.path for page in pages)
click.secho(f"{converter.log.info}", bold=True)
def set_config(converter):
if not os.path.exists("mkdocs.yml"):
return
with open("mkdocs.yml", "r") as f:
config = yaml.safe_load(f)
plugins = config.get("plugins", [])
for plugin in plugins:
if isinstance(plugin, dict) and 'pheasant' in plugin:
config = plugin['pheasant']
break
else:
return
cur_dir = config.get("cur_dir", 'page')
confing_dir = os.getcwd()
if cur_dir == "docs":
cur_dir = os.path.join(confing_dir, 'docs')
elif cur_dir == "config":
cur_dir = confing_dir
sys_paths = config.get("sys_paths", [])
sys_paths = [os.path.join(confing_dir, path) for path in sys_paths]
sys_paths = [os.path.normpath(path) for path in sys_paths]
converter.jupyter.set_config(
cur_dir=cur_dir,
sys_paths=sys_paths
)
@cli.command(help="Convert source files to rendered Markdown.")
@click.option("-r", "--restart", is_flag=True, help="Restart kernel after run.")
@click.option("-s", "--shutdown", is_flag=True, help="Shutdown kernel after run.")
@click.option("-f", "--force", is_flag=True, help="Delete cache and run.")
@click.option(
"-v", "--verbose", count=True, help="Print input codes and/or outputs from kernel."
)
@ext_option
@max_option
@paths_argument
def convert(paths, ext, max, restart, shutdown, force, verbose):
pages = Pages(paths, ext).collect()
length = len(pages)
click.secho(f"collected {length} files.", bold=True)
if length > max: # pragma: no cover
click.secho("Too many files. Aborted.", fg="yellow")
sys.exit()
if force:
for page in pages:
if page.has_cache:
page.cache.delete()
click.echo(page.cache.path + " was deleted.")
from pheasant.core.pheasant import Pheasant
converter = Pheasant(restart=restart, shutdown=shutdown, verbose=verbose)
converter.jupyter.safe = True
outputs = converter.convert_from_files(page.path for page in pages)
for page, output in zip(pages, outputs):
path = page.path.replace(".py", ".md").replace(".md", ".out.md")
with open(path, "w", encoding="utf-8") as f:
f.write(output)
@cli.command(help="List source files.")
@ext_option
@paths_argument
def list(paths, ext):
pages = Pages(paths, ext).collect()
def size(cache):
size = cache.size / 1024
if size > 1024:
size /= 1024
return f"{size:.01f}MB"
else:
return f"{size:.01f}KB"
for page in pages:
click.echo(
("* " if page.modified else " ")
+ page.path
+ (f" (cached, {size(page.cache)})" if page.has_cache else "")
)
click.secho(f"collected {len(pages)} files.", bold=True)
@cli.command(help="Delete caches for source files.")
@click.option("-y", "--yes", is_flag=True, help="Do not ask for confirmation.")
@ext_option
@paths_argument
def clean(paths, ext, yes):
pages = Pages(paths, ext).collect()
caches = [page.cache for page in pages if page.has_cache]
if not caches:
click.secho("No cache found. Aborted.", bold=True)
sys.exit()
for cache in caches:
click.echo(cache.path)
click.secho(f"collected {len(caches)} files.", bold=True)
if not yes:
click.confirm(
"Are you sure you want to delete the caches for these files?", abort=True
)
for cache in caches:
cache.delete()
click.echo(cache.path + " was deleted.")
@cli.command(help="Python script prompt.")
def python():
prompt(script=True)
def prompt(script=False):
click.echo("Enter double blank lines to exit.")
lines = []
while True:
line = click.prompt("", type=str, default="", show_default=False)
if lines and lines[-1] == "" and line == "":
break
lines.append(line)
source = "\n".join(lines).strip() + "\n"
from markdown import Markdown
from pheasant.core.pheasant import Pheasant
converter = Pheasant()
if script:
source = converter.parse(source, "script")
output = converter.parse(source, "main")
output = converter.parse(output, "link")
click.echo("[source]")
click.echo(source.strip())
click.echo("[markdown]")
click.echo(output.strip())
click.echo("[html]")
markdown = Markdown()
html = markdown.convert(output)
click.echo(html.strip())
@cli.command(help="Serve web application.")
@click.option("--port", default=8000, show_default=True, help="Port number.")
@paths_argument
@ext_option
def serve(port, paths, ext):
from pheasant.app.app import App
app = App(paths, ext)
app.run(port=port)
| [
"[email protected]"
] | |
13873d164ca16400626328281f48e76f70139854 | 805a795ea81ca8b5cee1dec638585011da3aa12f | /MAIN/SC8/descriptionSetup8.py | a047dd05fd5ebd1065ff27bbb45d935885f0fc4f | [
"Apache-2.0"
] | permissive | josipamrsa/Interactive3DAnimation | 5b3837382eb0cc2ebdee9ee69adcee632054c00a | a4b7be78514b38fb096ced5601f25486d2a1d3a4 | refs/heads/master | 2022-10-12T05:48:20.572061 | 2019-09-26T09:50:49 | 2019-09-26T09:50:49 | 210,919,746 | 0 | 1 | Apache-2.0 | 2022-10-11T01:53:36 | 2019-09-25T19:03:51 | Python | UTF-8 | Python | false | false | 1,328 | py | import bge, bgl # Game engine i logika
import blf # Upravljanje fontovima
import math # Matematicke operacije
# Kontrolerske varijable
cont = bge.logic.getCurrentController() # Aktivni kontroler
sc = bge.logic.getCurrentScene() # Aktivna scena
own = cont.owner # Vlasnik
# Rekalkulacija rotacije, translacije i skaliranja
# opisa/obavijesti u sceni
def recalculateRotation(obj,deg):
rec = obj.localOrientation.to_euler()
rec[1] = math.radians(deg)
obj.localOrientation = rec.to_matrix()
def rescaleObject(desc,frame):
frameScale = desc.localScale[0] * (len(desc['Text']) / 7)
frame.localScale[0] = frameScale
def repositionObjects(desc,frame,posXYZ):
frame.localPosition = posXYZ
desc.localPosition = [posXYZ[0], posXYZ[1]*0.3, posXYZ[2]-0.8]
# Objekti aktivne scene
desc = sc.objects['Description4']
bubble = sc.objects['Bubble4']
msgNo = desc['msgProp']
# Broj poruke oznacava interaktivni objekt na sceni
if msgNo == 0:
repositionObjects(desc,bubble,[-1, 1.5, 3.0])
rescaleObject(desc,bubble)
recalculateRotation(bubble,0)
elif msgNo == 1:
repositionObjects(desc,bubble,[-1, 1.5, 3.0])
rescaleObject(desc,bubble)
recalculateRotation(bubble,0)
# TODO - adaptirati skriptu za sve scene | [
"[email protected]"
] | |
f77cd4d3705f6836da20c19d301506a78fe19fc0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2511/61132/300621.py | 30fd96a4af0f3d26bfc73805f849f6008ed799d4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | t,k = map(int,input().split())
l=[]
ans=[]
for j in range(t-1):
l.append(int(input()))
for i in range(len(l)):
for le in range(len(l)-i,1,-1):
if le%2==0 and sum(l[i:i+le//2])<=k and sum(l[i+le//2:i+le])<=k:
ans.append(le)
break
else:
ans.append(0)
for i in ans:
print(i) | [
"[email protected]"
] | |
e97fcd99d6872281fac7555bf69f72ad04b23bf8 | aa64c62a3d246b87f3f1e5810a8f75b1d166aaf6 | /paradrop/daemon/paradrop/lib/utils/addresses.py | 39e033f7b0d50df4eacfe5692f24ffa180025536 | [
"Apache-2.0"
] | permissive | ParadropLabs/Paradrop | ca40b3373c0732c781f9c10d38da9b6e9fbd3453 | c910fd5ac1d1b5e234f40f9f5592cc981e9bb5db | refs/heads/master | 2023-02-26T17:51:53.058300 | 2022-03-01T17:46:10 | 2022-03-01T17:46:10 | 37,789,450 | 88 | 31 | Apache-2.0 | 2023-02-16T05:24:46 | 2015-06-20T23:18:38 | Python | UTF-8 | Python | false | false | 4,947 | py | ###################################################################
# Copyright 2013-2014 All Rights Reserved
# Authors: The Paradrop Team
###################################################################
import socket
import struct
from paradrop.lib.utils import pdos
def isIpValid(ipaddr):
"""Return True if Valid, otherwise False."""
try:
socket.inet_aton(ipaddr)
return True
except:
return False
def isIpAvailable(ipaddr, chuteStor, name):
"""
Make sure this IP address is available.
Checks the IP addresses of all zones on all other chutes, makes sure
subnets are not the same.
"""
chList = chuteStor.getChuteList()
for ch in chList:
# Only look at other chutes
if (name != ch.name):
otherIPs = ch.getChuteIPs()
if ipaddr in otherIPs:
return False
return True
def isWifiSSIDAvailable(ssid, chuteStor, name):
"""Make sure this SSID is available."""
chList = chuteStor.getChuteList()
for ch in chList:
# Only look at other chutes
if (name != ch.name):
otherSSIDs = ch.getChuteSSIDs()
#print("chute: %s other SSIDs: %s" % (ch, otherSSIDs))
for o in otherSSIDs:
if (o == ssid):
return False
return True
def isStaticIpAvailable(ipaddr, chuteStor, name):
"""
Make sure this static IP address is available.
Checks the IP addresses of all zones on all other chutes,
makes sure not equal."""
chList = chuteStor.getChuteList()
for ch in chList:
# Only look at other chutes
if (name != ch.name):
otherStaticIPs = ch.getChuteStaticIPs()
if ipaddr in otherStaticIPs:
return False
return True
def checkPhyExists(radioid):
"""Check if this chute exists at all, a directory /sys/class/ieee80211/phyX must exist."""
#DFW: used to be this, but when netns runs this doesn't exist, so we switched to using the debug sysfs '/sys/class/ieee80211/phy%d' % radioid
return pdos.exists('/sys/kernel/debug/ieee80211/phy%d' % radioid)
def incIpaddr(ipaddr, inc=1):
"""
Takes a quad dot format IP address string and adds the @inc value to it by converting it to a number.
Returns:
Incremented quad dot IP string or None if error
"""
try:
val = struct.unpack("!I", socket.inet_aton(ipaddr))[0]
val += inc
return socket.inet_ntoa(struct.pack('!I', val))
except Exception:
return None
def maxIpaddr(ipaddr, netmask):
"""
Takes a quad dot format IP address string and makes it the largest valid value still in the same subnet.
Returns:
Max quad dot IP string or None if error
"""
try:
val = struct.unpack("!I", socket.inet_aton(ipaddr))[0]
nm = struct.unpack("!I", socket.inet_aton(netmask))[0]
inc = struct.unpack("!I", socket.inet_aton("0.0.0.254"))[0]
val &= nm
val |= inc
return socket.inet_ntoa(struct.pack('!I', val))
except Exception:
return None
def getSubnet(ipaddr, netmask):
try:
val1 = struct.unpack("!I", socket.inet_aton(ipaddr))[0]
nm = struct.unpack("!I", socket.inet_aton(netmask))[0]
res = val1 & nm
return socket.inet_ntoa(struct.pack('!I', res))
except Exception:
return None
def getInternalIntfList(ch):
"""
Takes a chute object and uses the key:networkInterfaces to return a list of the internal
network interfaces that will exist in the chute (e.g., eth0, eth1, ...)
Returns:
A list of interface names
None if networkInterfaces doesn't exist or there is an error
"""
intfs = ch.getCache('networkInterfaces')
if(not intfs):
return None
l = []
for i in intfs:
l.append(i['internalIntf'])
return l
def getGatewayIntf(ch):
"""
Looks at the key:networkInterfaces for the chute and determines what the gateway should be
including the IP address and the internal interface name.
Returns:
A tuple (gatewayIP, gatewayInterface)
None if networkInterfaces doesn't exist or there is an error
"""
intfs = ch.getCache('networkInterfaces')
if(not intfs):
return (None, None)
for i in intfs:
if(i['type'] == 'wan'):
return (i['externalIpaddr'], i['internalIntf'])
return (None, None)
def getWANIntf(ch):
"""
Looks at the key:networkInterfaces for the chute and finds the WAN interface.
Returns:
The dict from networkInterfaces
None
"""
intfs = ch.getCache('networkInterfaces')
if(not intfs):
return None
for i in intfs:
if(i['type'] == 'wan'):
return i
return None
| [
"[email protected]"
] | |
754e5fbd041f370927f124f57bda6382516aab14 | dde00571d8e65208c0642f009cb1d4bc33460026 | /bigmler/tests/test_35_deepnet.py | dd683a5c4bec2ace5d8cd7e21db50ecb0164d84a | [
"Apache-2.0"
] | permissive | javs0188/bigmler | 44e5505f4751ebdfece7da87e4d4592b0da7ff51 | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | refs/heads/master | 2021-03-01T02:08:29.730986 | 2020-01-25T10:43:01 | 2020-01-25T10:43:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,217 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing deepnet predictions creation
"""
from __future__ import absolute_import
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module,
teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
import bigmler.tests.basic_batch_tst_prediction_steps as batch_pred
import bigmler.tests.basic_deepnet_steps as dn_pred
def setup_module():
"""Setup for the module
"""
common_setup_module()
test = TestPrediction()
test.setup_scenario02()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestPrediction(object):
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def teardown(self):
"""Calling generic teardown for every method
"""
self.world = teardown_class()
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario01(self):
"""
Scenario: Successfully building deepnet test predictions from start with no headers:
Given I create BigML deepnet resources uploading train "<data>" file with no headers to test "<test>" with no headers and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the deepnet model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris_nh.csv | ../data/test_iris_nh.csv | ./scenario1_dn_nh/predictions.csv | ./check_files/predictions_iris_dn.csv |
"""
print self.test_scenario01.__doc__
examples = [
['data/iris_nh.csv', 'data/test_iris_nh.csv', 'scenario1_dn_nh/predictions.csv', 'check_files/predictions_iris_dn.csv']]
for example in examples:
print "\nTesting with:\n", example
dn_pred.i_create_all_dn_resources_with_no_headers(self, example[0], example[1], example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
dn_pred.i_check_create_dn_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def setup_scenario02(self):
"""
Scenario: Successfully building test predictions from start:
Given I create BigML deepnet resources uploading train "<data>" file to test "<test>" and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris.csv | ../data/test_iris.csv | ./scenario1_dn/predictions.csv | ./check_files/predictions_iris_dn.csv |
"""
print self.setup_scenario02.__doc__
examples = [
['data/iris.csv', 'data/test_iris.csv', 'scenario1_dn/predictions.csv', 'check_files/predictions_iris_dn.csv']]
for example in examples:
print "\nTesting with:\n", example
dn_pred.i_create_all_dn_resources(self, example[0], example[1], example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
dn_pred.i_check_create_dn_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario03(self):
"""
Scenario: Successfully building test predictions from source
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML deepnet resources using source to test "<test>" and log predictions in "<output>"
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1_dn/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario2/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario03.__doc__
examples = [
['scenario1_dn', '{"data": "data/iris.csv", "output": "scenario1_dn/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario2_dn/predictions.csv', 'check_files/predictions_iris_dn.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
dn_pred.i_create_dn_resources_from_source(self, None, test=example[2], output=example[3])
test_pred.i_check_create_dataset(self, suffix=None)
dn_pred.i_check_create_dn_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario04(self):
"""
Scenario: Successfully building test predictions from dataset
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML deepnet resources using dataset to test "<test>" and log predictions in "<output>"
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario3/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario04.__doc__
examples = [
['scenario1_dn', '{"data": "data/iris.csv", "output": "scenario1_dn/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario3_dn/predictions.csv', 'check_files/predictions_iris_dn.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
dn_pred.i_create_dn_resources_from_dataset(self, None, test=example[2], output=example[3])
dn_pred.i_check_create_dn_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario05(self):
"""
Scenario: Successfully building test predictions from model
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML deepnet resources using model to test "<test>" and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario4/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario05.__doc__
examples = [
['scenario1_dn', '{"data": "data/iris.csv", "output": "scenario1_dn/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario4_dn/predictions.csv', 'check_files/predictions_iris_dn.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
dn_pred.i_create_dn_resources_from_model(self, test=example[2], output=example[3])
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario06(self):
"""
Scenario: Successfully building batch test predictions from model
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML deepnet resources using model to test "<test>" as a batch prediction and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario4/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario06.__doc__
examples = [
['scenario1_dn', '{"data": "data/iris.csv", "output": "scenario1_dn/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario5_dn/predictions.csv', 'check_files/predictions_iris_dn.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
dn_pred.i_create_dn_resources_from_model_remote(self, test=example[2], output=example[3])
batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario07(self):
"""
Scenario: Successfully building batch test predictions from model with customized output
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML deepnet resources using model to test "<test>" as a batch prediction with output format "<batch-output>" and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test |batch_output | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ../data/batch_output.json | ./scenario6_dn/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario07.__doc__
examples = [
['scenario1_dn', '{"data": "data/iris.csv", "output": "scenario1_dn/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'data/batch_output.json', 'scenario6_dn/predictions.csv', 'check_files/predictions_iris_dn_prob.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
dn_pred.i_create_dn_resources_from_model_remote_with_options(self, test=example[2], output=example[4], options_file=example[3])
batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[5])
| [
"[email protected]"
] | |
57aff09534e2ac61bab36946cf7f415e0d943545 | f24c35bb0919f9ad75f45e7906691c3189536b33 | /chengbinWorkSpace/droneLanding/python/main1.py | 006ebfa79f8a773cf8017786c92d08d5e4727681 | [] | no_license | mfkiwl/supreme-xcb | 9b941f49bab5a811d23a0cd75790d1e5722aa9f0 | d1287657607bf86d4b1393acf285951760670925 | refs/heads/main | 2023-03-07T12:10:28.288282 | 2021-03-02T11:46:00 | 2021-03-02T11:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,158 | py | '''
brief:
Version:
Autor: shuike
Date: 2020-12-31 17:36:47
LastEditors: shuike
LastEditTime: 2020-12-31 17:39:59
FilePath: /droneLanding/python/main1.py
'''
import numpy as np
import time
import cv2
import cv2.aruco as aruco
#with np.load('webcam_calibration_output.npz') as X:
# mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
#mtx =
#2946.48 0 1980.53
#0 2945.41 1129.25
#0 0 1
mtx = np.array([
[2946.48, 0, 1980.53],
[ 0, 2945.41, 1129.25],
[ 0, 0, 1],
])
#我的手机拍棋盘的时候图片大小是 4000 x 2250
#ip摄像头拍视频的时候设置的是 1920 x 1080,长宽比是一样的,
#ip摄像头设置分辨率的时候注意一下
dist = np.array( [0.226317, -1.21478, 0.00170689, -0.000334551, 1.9892] )
# video = "http://admin:[email protected]:8081/" # 手机ip摄像头
# 根据ip摄像头在你手机上生成的ip地址更改,右上角可修改图像分辨率
video = 0
cap = cv2.VideoCapture(video)
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
#num = 0
while True:
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
'''
detectMarkers(...)
detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
mgPoints]]]]) -> corners, ids, rejectedImgPoints
'''
#lists of ids and the corners beloning to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,
aruco_dict,
parameters=parameters)
# if ids != None:
if ids is not None:
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.05, mtx, dist)
# Estimate pose of each marker and return the values rvet and tvec---different
# from camera coeficcients
(rvec-tvec).any() # get rid of that nasty numpy value array error
# aruco.drawAxis(frame, mtx, dist, rvec, tvec, 0.1) #Draw Axis
# aruco.drawDetectedMarkers(frame, corners) #Draw A square around the markers
for i in range(rvec.shape[0]):
aruco.drawAxis(frame, mtx, dist, rvec[i, :, :], tvec[i, :, :], 0.03)
aruco.drawDetectedMarkers(frame, corners)
###### DRAW ID #####
# cv2.putText(frame, "Id: " + str(ids), (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)
else:
##### DRAW "NO IDS" #####
cv2.putText(frame, "No Ids", (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)
# Display the resulting frame
cv2.imshow("frame",frame)
key = cv2.waitKey(30)
if key == 27: # 按esc键退出
print('esc break...')
cap.release()
cv2.destroyAllWindows()
break
if key == ord(' '): # 按空格键保存
# num = num + 1
# filename = "frames_%s.jpg" % num # 保存一张图像
filename = str(time.time())[:10] + ".jpg"
cv2.imwrite(filename, frame) | [
"[email protected]"
] | |
1bba11a9a7d0dd0ee37d1de55d8acf5bf381a26f | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/projectdiscovery/client/const.py | ad0cb4b6c30e1654050e4db0e6c4dbd6ccfdb876 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 3,862 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\projectdiscovery\client\const.py
import os
RESROOT = os.path.dirname(__file__) + '\\res\\'
class Texts:
SelectionsLabel = 'Selections'
TaskImageLabel = 'Jovian Tissue Sample'
RankLabel = 'Rank: '
ScoreLabel = ' Discovery Credits'
SubmitButtonLabel = 'Submit'
PreviousGroupButtonLabel = 'Previous Group'
NextGroupButtonLabel = 'Next Group'
class Events:
PlayTutorial = 'OnCitizenSciencePlayTutorial'
NewTask = 'OnCitizenScienceNewTask'
NewTrainingTask = 'OnCitizenScienceNewTrainingTask'
CategoryChanged = 'OnCitizenScienceCategoryChanged'
ExcludeCategories = 'OnCitizenScienceExcludeCategories'
SubmitSolution = 'OnCitizenScienceSubmitSolution'
ResultReceived = 'OnCitizenScienceResultReceived'
TrainingResultReceived = 'OnCitizenScienceTrainingResultReceived'
ContinueFromResult = 'OnCitizenScienceContinueFromResult'
ContinueFromTrainingResult = 'OnCitizenScienceContinueFromTrainingResult'
UpdateScoreBar = 'OnCitizenScienceUpdateScoreBar'
ProjectDiscoveryStarted = 'OnProjectDiscoveryStarted'
ContinueFromReward = 'OnProjectDiscoveryContinueFromReward'
StartTutorial = 'OnProjectDiscoveryStartTutorial'
QuitTutorial = 'OnProjectDiscoveryQuitTutorial'
CloseResult = 'OnProjectDiscoveryResultClosed'
HoverMainImage = 'OnHoverMainImage'
ClickMainImage = 'OnClickMainImage'
ProcessingViewFinished = 'OnProcessingViewFinished'
MouseExitMainImage = 'OnMouseExitMainImage'
MouseEnterMainImage = 'OnMouseEnterMainImage'
TransmissionFinished = 'OnTransmissionFinished'
RewardViewFadeOut = 'OnRewardViewFadeOut'
MainImageLoaded = 'OnMainImageLoaded'
EnableUI = 'OnUIEnabled'
class Settings:
ProjectDiscoveryTutorialPlayed = 'ProjectDiscoveryTutorialPlayed'
ProjectDiscoveryIntroductionShown = 'ProjectDiscoveryIntroductionShown'
class Sounds:
CategorySelectPlay = 'wise:/project_discovery_category_select_play'
MainImageLoadPlay = 'wise:/project_discovery_main_image_calculation_loop_play'
MainImageLoadStop = 'wise:/project_discovery_main_image_calculation_loop_stop'
MainImageLoopPlay = 'wise:/project_discovery_main_image_loop_play'
MainImageLoopStop = 'wise:/project_discovery_main_image_loop_stop'
MainImageOpenPlay = 'wise:/project_discovery_main_image_open_play'
ColorSelectPlay = 'wise:/project_discovery_color_select_play'
ProcessingPlay = 'wise:/project_discovery_analysis_calculation_loop_play'
ProcessingStop = 'wise:/project_discovery_analysis_calculation_loop_stop'
RewardsWindowOpenPlay = 'wise:/project_discovery_analysis_window_open_play'
RewardsWindowClosePlay = 'wise:/project_discovery_analysis_window_close_play'
RewardsWindowLoopPlay = 'wise:/project_discovery_analysis_window_loop_play'
RewardsWindowLoopStop = 'wise:/project_discovery_analysis_window_loop_stop'
AnalysisDonePlay = 'wise:/project_discovery_analysis_done_play'
AnalysisWindowMovePlay = 'wise:/project_discovery_analysis_window_move_play'
TrainingTasks = {'starter': {160000016: [102],
160000017: [211],
160000005: [303]},
'levelOne': {160000009: [233],
160000001: [203],
160000011: [221]},
'levelTwo': {160000014: [123],
160000003: [112],
160000007: [113]},
'levelThree': {160000010: [102, 233],
160000006: [121, 203],
160000000: [102, 201]},
'levelFour': {160000012: [303, 201]},
'levelFive': {160000008: [102, 303, 302],
160000015: [102, 215]},
'negative': {160000002: [901],
160000004: [101],
160000013: [201]},
'unspecific': {}}
| [
"[email protected]"
] | |
7e0ba785a515715cb0fc0bc4a2a9e33058df31f5 | 682581de9e3674d157877756d1a536f5b028c045 | /script/Analysis3b_NW.py | 581494f743f8299a4ad5ac878e03fb7d41090989 | [] | no_license | wchnicholas/ProteinGFourMutants | 3a81b9175e0e5bb864d5723fa59443a3ba07eda6 | dbdd7639187e0b8f22359f404ce4d1d950fcc8a9 | refs/heads/master | 2023-08-16T19:53:02.475407 | 2023-08-03T12:24:44 | 2023-08-03T12:24:44 | 33,599,807 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,443 | py | #!/usr/bin/python
import os
import sys
import operator
import networkx as nx
import numpy as np
from itertools import imap
def all_shortest_paths(G, source, target, weight=None):
if weight is not None:
pred,dist = nx.dijkstra_predecessor_and_distance(G,source,weight=weight)
else:
pred = nx.predecessor(G,source)
if target not in pred:
raise nx.NetworkXNoPath()
stack = [[target,0]]
top = 0
while top >= 0:
node,i = stack[top]
if node == source:
yield [p for p,n in reversed(stack[:top+1])]
if len(pred[node]) > i:
top += 1
if top == len(stack):
stack.append([pred[node][i],0])
else:
stack[top] = [pred[node][i],0]
else:
stack[top-1][1] += 1
top -= 1
def hamming(str1, str2):
assert len(str1) == len(str2)
return sum(imap(operator.ne, str1, str2))
def TsvWithHeader2Hash(fitfile):
H = {}
infile = open(fitfile,'r')
countline = 0
header = []
for line in infile.xreadlines():
countline += 1
line = line.rstrip().rsplit("\t")
if countline == 1: header = line; continue
mut = line[0]
H[mut] = {}
for i in range(1,len(line)): H[mut][header[i]] = line[i]
infile.close()
return H
def filterfithash(fithash, condition):
for mut in fithash.keys():
if fithash[mut][condition] == 'NA' or '_' in mut:
del fithash[mut]
return fithash
def generatenodes(mut,WT,fithash):
pathhash = {}
nodes = []
for i in range(4): pathhash[i] = [WT[i],mut[i]]
for i in range(16):
index = str(bin(i)).rsplit('b')[1]
index = ''.join(map(str,[0]*(4-len(index))))+index
node = ''
for p in range(len(index)):
node+=pathhash[p][int(index[p])]
if fithash.has_key(node): nodes.append(node)
else: nodes.append('NA')
return nodes
def buildgraph(nodes):
G = nx.Graph()
[G.add_node(node) for node in nodes]
for n1 in nodes:
for n2 in nodes:
if hamming(n1,n2) == 1: G.add_edge(n1,n2)
return G
def removenodes(nodes, fithash, cutoff, condition):
cleannodes = []
for node in nodes:
if float(fithash[node][condition]) >= cutoff:
cleannodes.append(node)
return cleannodes
def stucking(path,fithash,condition,mut,WT):
mutfit = float(fithash[mut][condition])
wtfit = float(fithash[WT][condition])
for step in path:
stepfit = float(fithash[step][condition])
if step != WT and step != mut and stepfit > mutfit and stepfit > wtfit: return 1
return 0
def monoincr(path,fithash,condition,mut,WT):
for n in range(1,len(path)):
stepPrev = path[n-1]
stepCurr = path[n]
if float(fithash[stepPrev][condition]) > float(fithash[stepCurr][condition]): return 0
return 1
def localmaxing(G,WT,fithash,condition):
localmaxs = []
for node in G.nodes():
fit = float(fithash[node][condition])
neighfits = []
[neighfits.append(float(fithash[neigh][condition])) for neigh in G.neighbors(node)]
if max(neighfits) < fit: localmaxs.append(node)
return localmaxs
def pathwayanalysis(fithash,muts,WT,condition,outfile):
print 'Pathway analysis started'
for mut in muts:
HD = int(fithash[mut]['HD'])
mutfit = float(fithash[mut][condition])
if HD == 4 and mutfit < 1 and mutfit > 0.01:
nodes = generatenodes(WT,mut,fithash)
if 'NA' in nodes: continue
G = buildgraph(nodes)
localmaxs = localmaxing(G,WT,fithash,condition)
if len(localmaxs) != 1 or localmaxs[0] != WT: continue
assert(max([float(fithash[node][condition]) for node in nodes])==1)
paths = all_shortest_paths(G,mut,WT)
monoIpath = 0
for path in paths:
assert(len(path)==5)
#if float(fithash[path[1]][condition]) <= 0.01: continue #Filter first step fitness
monoIpath+=monoincr(path,fithash,condition,mut,WT)
outfile.write("\t".join(map(str,[condition, mut, mutfit, len(nodes), monoIpath]))+"\n")
def main():
WT = 'VDGV'
fitfile = 'result/Mutfit'
outfile = 'analysis/ShortMonoPaths4ToWT'
condition = 'I20fit'
fithash = TsvWithHeader2Hash(fitfile)
fithash = filterfithash(fithash, condition)
muts = fithash.keys()
outfile = open(outfile,'w')
outfile.write("Condition\tMut\tMutfit\tNodes\tMonoIPath\n")
pathwayanalysis(fithash,muts,WT,condition,outfile)
outfile.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f11e1e87cc797779c10501b1ba6580ef22410b8b | 4f72e1235675562838c84dc7bfc7c97b9139dfd3 | /src/tmdp/programs/pomdp_list/mdp_builder.py | 2214c2518a806b47b79f8552b6517a072fe09599 | [] | no_license | AndreaCensi/tmdp | dcc590cacd5f146b08c6dced132ca601554597f6 | 28d3103eedcc9370e56c96fff8004b8320b90c43 | refs/heads/master | 2021-01-12T19:24:46.987223 | 2014-06-10T01:39:39 | 2014-06-10T01:39:39 | 16,900,103 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,565 | py | from collections import defaultdict
from contracts import contract
from tmdp.sampled_mdp import SampledMDP
__all__ = ['MDPBuilder']
class MDPBuilder():
@contract(start_dist='ddist')
def __init__(self, start_dist):
self._state2id = {}
self._id2state = {}
self._transitions = []
self.goals = set()
self._start_dist = {}
for state, p_state in start_dist.items():
self._start_dist[self._get_id(state)] = float(p_state)
def _get_id(self, state):
if state in self._state2id:
return self._state2id[state]
# print('not creating different IDs')
# l = len(self._state2id)
l = state
self._state2id[state] = l
self._id2state[l] = state
return l
def mark_goal(self, state):
self.goals.add(self._get_id(state))
def add_state(self, state):
pass
def add_transition(self, state, action, state2, probability, reward):
s1 = self._get_id(state)
s2 = self._get_id(state2)
t = (s1, action, s2, probability, reward)
self._transitions.append(t)
def get_sampled_mdp(self, goal_absorbing=True, stay=None):
"""
If goal_absorbing = True, all actions of the goal states
give the same state.
"""
state2actions = defaultdict(lambda: set())
state2action2transition = defaultdict(lambda: defaultdict(lambda:{}))
state2action2state2reward = \
defaultdict(lambda: defaultdict(lambda: defaultdict(lambda:{}))) # xXx: should be 0
# create states self.states = set()
states = set()
for s1 in self._id2state:
states.add(s1)
actions = set()
for (s1, action, s2, p, reward) in self._transitions:
actions.add(action)
state2actions[s1].add(action)
state2action2transition[s1][action][s2] = float(p) # += p
state2action2state2reward[s1][action][s2] = reward # += p
if goal_absorbing:
if False:
for g in self.goals:
for a in actions:
state2actions[g].add(a)
# print('%10s %s' % (reward, (s1, action, s2)))
state2action2transition[g][a][g] = 1.0
# print ('setting %s to 0 instead of %s' %
# ((g, a, g), state2action2state2reward[g][a][g]))
state2action2state2reward[g][a][g] = 0.0
else:
# for each goal state, we reset to the start distribution
assert isinstance(stay, float)
for g in self.goals:
for a in actions:
state2actions[g].add(a)
state2action2transition[g][a] = {}
state2action2transition[g][a][g] = stay
state2action2state2reward[g][a][g] = 0.0
for s0 in self._start_dist:
x = self._start_dist[s0] * (1.0 - stay)
state2action2transition[g][a][s0] = x
state2action2state2reward[g][a][s0] = 0.0
mdp = SampledMDP(states=states,
state2actions=state2actions,
state2action2transition=state2action2transition,
state2action2state2reward=state2action2state2reward,
start_dist=self._start_dist,
goals=self.goals)
return mdp
| [
"[email protected]"
] | |
f5d2df5f01ced18550aabed0aec4b75a5f07bb56 | 97f285b6f8016a8d1d2d675fffb771df3c9e37b9 | /study/checkio/Electronic Station/largest_histogram.py | 55dad59f2bd43ea949dc828d939a716654cfe3ff | [] | no_license | oskomorokhov/python | ef5408499840465d18852954aee9de460d0e7250 | 8909396c4200bd2fca19d3f216ed5f484fb2192a | refs/heads/master | 2021-05-14T09:27:25.413163 | 2019-12-12T21:00:05 | 2019-12-12T21:00:05 | 116,327,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | #!/usr/bin/env checkio --domain=py run largest-histogram
# https://py.checkio.org/mission/largest-histogram/
# "Your power to choose can never be taken from you.
# It can be neglected and it can be ignored.
# But if used, it can make all the difference."
# ― Steve Goodier
#
# You have a histogram. Try to find size of the biggest rectangle you can build out of the histogram bars.
#
# Input:List of all rectangles heights in histogram
#
# Output:Area of the biggest rectangle
#
# Example:
#
#
# largest_histogram([5]) == 5
# largest_histogram([5, 3]) == 6
# largest_histogram([1, 1, 4, 1]) == 4
# largest_histogram([1, 1, 3, 1]) == 4
# largest_histogram([2, 1, 4, 5, 1, 3, 3]) == 8
# How it is used:There is no way the solution you come up with will be any useful in a real life. Just have some fun here.
#
# Precondition:
# 0 < len(data) < 1000
#
#
#
# END_DESC
def largest_histogram(histogram):
if len(histogram) == 1:
return histogram[0]
if len(set(histogram)) == 1:
return sum(histogram)
l = []
v_offset = 1
count = 0
while v_offset <= max(histogram):
for i, b in enumerate(histogram):
if b-v_offset >= 0:
count += 1
else:
l.append((v_offset, count))
count = 0
continue
else:
l.append((v_offset, count))
count = 0
v_offset += 1
return(max([x*i[n+1] for i in l for n, x in enumerate(i[:-1])]))
if __name__ == "__main__":
# These "asserts" using only for self-checking and not necessary for auto-testing
#assert largest_histogram([5]) == 5, "one is always the biggest"
#assert largest_histogram([5, 3]) == 6, "two are smallest X 2"
#assert largest_histogram([1, 1, 4, 1]) == 4, "vertical"
#assert largest_histogram([1, 1, 3, 1]) == 4, "horizontal"
#assert largest_histogram([2, 1, 4, 5, 1, 3, 3]) == 8, "complex"
#assert largest_histogram([0,1,2,3,4,5,6,7,8,9,8,7,6,5,4,3,2,1,0]) == False
assert largest_histogram([2, 1, 4, 5, 1, 3, 3]) == 8
print("Done! Go check it!")
| [
"[email protected]"
] | |
4016ca30ee6ed155a0c52f3a9303bb746026353c | 4193a2c27d55bb9069680d624cbc223d9d2f5c0d | /gqlshop/gqlshop/wsgi.py | 16f47a7aa220f67fe8b8ae6fb19e9844dca971a5 | [] | no_license | r2d2-lex/graphql-example | fa517fb7daf9d6a9d779671a608be8980a5a8804 | 4405b70ccf171cb04e0cd4728c782be20b25e552 | refs/heads/master | 2022-12-23T23:44:28.325216 | 2020-10-07T19:14:03 | 2020-10-07T19:14:03 | 291,699,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for gqlshop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gqlshop.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
3390e4dc6858eccf14bf91fb2b5b5710b7112ae2 | 8b23c09e4f0220a57273845a505c4f52a32c035f | /gallery/photogall/views.py | 600419b8c721e57e67247ee696bbc5d2a3038c21 | [] | no_license | MaryMbugua/PhotoGallery | ca89131211d55e1ad5cd5adb104605c76de2afc7 | 1c3a75c575d91a764c099092192f1c365a92899d | refs/heads/master | 2020-03-15T12:32:48.993521 | 2018-05-10T09:57:14 | 2018-05-10T09:57:14 | 132,146,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from django.shortcuts import render
from django.http import HttpResponse,Http404,HttpResponseRedirect
from .models import Location,Category,Image
# Create your views here.
def landing(request):
images = Image.get_image_by_id()
return render(request,'index.html',{"images":images}) | [
"[email protected]"
] | |
30fa2ca302469ae0002ea5b346973437ab8a20b7 | b05b12abb6116440ded3862214c09d7ec7d007d7 | /pin_is_touched.py | f71628c449def5e2bcf685a7f71810a97b9367ac | [] | no_license | cirosantilli/bbc-microbit-cheat | 22713af05173757f0283316d53023f1db21e07cc | 4e503a5e973851b8fe065fcb86f6be4731fee2cd | refs/heads/master | 2021-07-16T04:42:39.000814 | 2016-10-24T21:44:09 | 2016-10-24T21:44:09 | 71,735,591 | 0 | 2 | null | 2021-02-24T16:33:55 | 2016-10-23T23:21:53 | Python | UTF-8 | Python | false | false | 327 | py | """
Check which electric connector pins are touched.
You must also touch the GND pin at the same time to see anything.
"""
from microbit import *
while True:
val = 0
if pin0.is_touched():
val += 1
if pin1.is_touched():
val += 2
if pin2.is_touched():
val += 4
display.show(str(val))
| [
"[email protected]"
] | |
6a240756c57303c77f96f2c2d6e05dec159770f6 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /6_tree/前缀树trie/好题/Ghost.py | 2dc01dcb317b3e2726dc22fdb00ccba0a1a2dc4e | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | from collections import defaultdict
from typing import List
# n ≤ 10,000 where n is the length of dictionary.
# 两人轮流报的字符串必须是某个单词前缀,谁先报到整个单词谁输
# 问第一个玩家是否胜利,在两人都最好发挥下
class Solution:
def solve(self, words: List[str]) -> int:
def dfs(cur) -> bool:
if "#" in cur:
return False
return not any(dfs(node) for node in cur.values()) # 队手报什么都不能赢
Trie = lambda: defaultdict(Trie)
root = Trie()
for w in words:
cur = root
for c in w:
cur = cur[c]
cur = cur["#"]
return any(dfs(node) for node in root.values())
print(Solution().solve(words=["ghost", "ghostbuster", "gas"]))
# Here is a sample game when dictionary is ["ghost", "ghostbuster", "gas"]:
# Player 1: "g"
# Player 2: "h"
# Player 1: "o"
# Player 2: "s"
# Player 1: "t" [loses]
# If player 2 had chosen "a" as the second letter, player 1 would still lose since they'd be forced to write the last letter "s".
| [
"[email protected]"
] | |
13e1b504d533a70e45dfe026bb832bb026f4a5dd | 38a028123c2b49590428f1369dae80c5f68d8f18 | /pike/test_util.py | 42609291af5f33efbf6b2f4ad1663dcceba09251 | [
"MIT"
] | permissive | stevearc/python-pike | 2412969768065b05fbde47abb68d281649382725 | 588fd5fae4da4a69f355279df10a3b4a41078ea2 | refs/heads/master | 2016-09-05T15:01:02.274490 | 2014-05-30T08:09:58 | 2014-05-30T08:09:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,128 | py | """ Tests for pike.util """
import six
import os
from .test import BaseFileTest
from pike import util, sqlitedict
class TestFileMatch(BaseFileTest):
""" Tests for recursive file glob matching """
def setUp(self):
super(TestFileMatch, self).setUp()
self.make_files(
'app.js',
'widget.js',
'common/util.js',
'common/api.js',
'shop/models.js',
'shop/util.js',
)
def test_prefix(self):
""" Can select files limited by directory prefix """
results = util.recursive_glob(self.tempdir, '*', 'common')
self.assertItemsEqual(results, ['common/util.js', 'common/api.js'])
def test_prefix_in_glob(self):
""" Can embed a prefix inside the search glob """
results = util.recursive_glob(self.tempdir, 'common/*')
self.assertItemsEqual(results, ['common/util.js', 'common/api.js'])
def test_prefix_and_invert(self):
""" Can both invert the match and provide a prefix """
results = util.recursive_glob(self.tempdir, '*.js:!common/*:!shop/*')
self.assertItemsEqual(results, ['app.js', 'widget.js'])
def test_match(self):
""" Globs match filenames """
results = util.recursive_glob(self.tempdir, 'util.js')
self.assertItemsEqual(results, ['common/util.js', 'shop/util.js'])
def test_pathsep(self):
""" Patterns can be separated by a ':' """
results = util.recursive_glob(self.tempdir, 'app.js:widget.js')
self.assertEquals(results, ['app.js', 'widget.js'])
def test_pattern_list(self):
""" Patterns can be provided as a list """
results = util.recursive_glob(self.tempdir, ['app.js', 'widget.js'])
self.assertEquals(results, ['app.js', 'widget.js'])
def test_invert_match(self):
""" Prefixing a glob with ! will remove matching elements """
results = util.recursive_glob(self.tempdir, 'app.js:!*.js')
self.assertEquals(results, [])
def test_dedupe(self):
""" Results should not contain duplicates """
results = util.recursive_glob(self.tempdir, 'app.js:app.js')
self.assertEquals(results, ['app.js'])
class TestSqliteDict(BaseFileTest):
""" Tests for sqlitedict """
def test_sqlitedict(self):
""" Run a bunch of tests on sqlitedicts """
with sqlitedict.open() as d:
self.assertEqual(list(d), [])
self.assertEqual(len(d), 0)
self.assertFalse(d)
d['abc'] = 'rsvp' * 100
self.assertEqual(d['abc'], 'rsvp' * 100)
self.assertEqual(len(d), 1)
d['abc'] = 'lmno'
self.assertEqual(d['abc'], 'lmno')
self.assertEqual(len(d), 1)
del d['abc']
self.assertFalse(d)
self.assertEqual(len(d), 0)
d['abc'] = 'lmno'
d['xyz'] = 'pdq'
self.assertEqual(len(d), 2)
self.assertItemsEqual(list(six.iteritems(d)), [('abc', 'lmno'),
('xyz', 'pdq')])
self.assertItemsEqual(d.items(), [('abc', 'lmno'), ('xyz', 'pdq')])
self.assertItemsEqual(d.values(), ['lmno', 'pdq'])
self.assertItemsEqual(d.keys(), ['abc', 'xyz'])
self.assertItemsEqual(list(d), ['abc', 'xyz'])
d.update(p='x', q='y', r='z')
self.assertEqual(len(d), 5)
self.assertItemsEqual(d.items(),
[('abc', 'lmno'), ('xyz', 'pdq'),
('q', 'y'), ('p', 'x'), ('r', 'z')])
del d['abc']
try:
d['abc']
except KeyError:
pass
else:
assert False
try:
del d['abc']
except KeyError:
pass
else:
assert False
self.assertItemsEqual(list(d), ['xyz', 'q', 'p', 'r'])
self.assertTrue(d)
d.clear()
self.assertFalse(d)
self.assertEqual(list(d), [])
d.update(p='x', q='y', r='z')
self.assertItemsEqual(list(d), ['q', 'p', 'r'])
d.clear()
self.assertFalse(d)
def test_file_persistence(self):
""" Dict should be saved to a file """
with sqlitedict.open('test.db') as d:
d['abc'] = 'def'
with sqlitedict.open('test.db') as d:
self.assertEqual(d['abc'], 'def')
def test_flag_n(self):
""" The 'n' flag will clear an existing database """
with sqlitedict.open('test.db', flag='n') as d:
d['abc'] = 'def'
with sqlitedict.open('test.db', flag='n') as d:
self.assertFalse(d)
def test_flag_w(self):
""" The 'w' flag will clear existing table """
with sqlitedict.open('test.db', 'a') as d:
d['abc'] = 'def'
with sqlitedict.open('test.db', 'b') as d:
d['abc'] = 'def'
with sqlitedict.open('test.db', 'a', flag='w') as d:
self.assertFalse(d)
with sqlitedict.open('test.db', 'b') as d:
self.assertEqual(d['abc'], 'def')
def test_bad_flag(self):
""" Passing a bad flag raises error """
with self.assertRaises(ValueError):
sqlitedict.open(flag='g')
def test_memory(self):
""" in-memory databases do not create files """
with sqlitedict.open() as d:
d['abc'] = 'def'
self.assertEqual(os.listdir(os.curdir), [])
self.assertEqual(os.listdir(os.curdir), [])
def test_autocommit(self):
""" When autocommit=True the db is automatically updated """
d1 = sqlitedict.open('test.db', autocommit=True)
d2 = sqlitedict.open('test.db', autocommit=True)
d1['abc'] = 'def'
self.assertEqual(d2['abc'], 'def')
def test_no_autocommit(self):
""" When autocommit=False the db is updated when commit() is called """
d1 = sqlitedict.open('test.db', autocommit=False)
d2 = sqlitedict.open('test.db', autocommit=False)
d1['abc'] = 'def'
self.assertFalse(d2)
d1.commit()
self.assertEqual(d2['abc'], 'def')
def test_commit_after_close(self):
""" Calling commit() after closing sqlitedict raises error """
d = sqlitedict.open()
d.close()
with self.assertRaises(IOError):
d.commit()
def test_close_calls_commit(self):
""" If autocommit=False, closing sqlitedict automatically commits """
d1 = sqlitedict.open('test.db', autocommit=False)
d2 = sqlitedict.open('test.db', autocommit=False)
d1['abc'] = 'def'
self.assertFalse(d2)
d1.close()
self.assertEqual(d2['abc'], 'def')
def test_terminate(self):
""" Calling terminate() removes database file """
with sqlitedict.open('test.db') as d:
d['abc'] = 'def'
d.terminate()
self.assertFalse(os.path.exists('test.db'))
| [
"[email protected]"
] | |
6dec4b41ccfe3eeec1c52bc3d20b4dd04b5ba518 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/databox/azext_databox/_validators.py | a4339304141cb13037530ae3fa80a48739c595c6 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 3,935 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import resource_id
def validate_create_input_parameters(cmd, namespace):
_parse_storage_account_details(cmd, namespace)
_parse_managed_disk_details(cmd, namespace)
_validate_expected_data_size_for_databoxdisk(namespace)
_validate_destination_account_details(namespace)
def _parse_storage_account_details(cmd, namespace):
"""Parse storage account details for destination."""
from msrestazure.tools import is_valid_resource_id
if not namespace.destination_account_details:
namespace.destination_account_details = []
if namespace.storage_accounts:
for storage_account in namespace.storage_accounts:
if storage_account and not is_valid_resource_id(storage_account):
storage_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=storage_account
)
if storage_account:
storage_account_details = {'storage_account_id': storage_account,
'data_destination_type': 'StorageAccount'}
namespace.destination_account_details.append(storage_account_details)
del namespace.storage_accounts
def _parse_managed_disk_details(cmd, namespace):
"""Parse managed disk details for destination."""
from msrestazure.tools import is_valid_resource_id
if not namespace.destination_account_details:
namespace.destination_account_details = []
subscription = get_subscription_id(cmd.cli_ctx)
if namespace.staging_storage_account and not is_valid_resource_id(namespace.staging_storage_account):
namespace.staging_storage_account = resource_id(
subscription=subscription,
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=namespace.staging_storage_account
)
if namespace.resource_group_for_managed_disk and not is_valid_resource_id(
namespace.resource_group_for_managed_disk):
namespace.resource_group_for_managed_disk = '/subscriptions/' + subscription + '/resourceGroups/' + namespace.resource_group_for_managed_disk
if namespace.staging_storage_account and namespace.resource_group_for_managed_disk:
managed_disk_details = {'staging_storage_account_id': namespace.staging_storage_account,
'resource_group_id': namespace.resource_group_for_managed_disk,
'data_destination_type': 'ManagedDisk'}
namespace.destination_account_details.append(managed_disk_details)
del namespace.staging_storage_account
del namespace.resource_group_for_managed_disk
def _validate_expected_data_size_for_databoxdisk(namespace):
if namespace.sku == 'DataBoxDisk' and not namespace.expected_data_size:
raise ValueError(
"You must provide '--expected-data-size' when the 'sku' is 'DataBoxDisk'.")
def _validate_destination_account_details(namespace):
if not namespace.destination_account_details:
raise ValueError(
"You must provide at least one '--storage-account' or the combination of '--staging-storage-account' and "
"'--resource-group-for-managed-disk'")
| [
"[email protected]"
] | |
4122aa86f674ce7e19064a2e472a1fb7a4326c1a | 359f3d8a1a2b5524490c314a44d60cec1d06f658 | /whoweb/campaigns/events.py | 767989d855abd54f18cae6be04b6b6fcbe3b9002 | [] | no_license | sivasuriyangithub/Merket_Intellect-s3.route | ec9d9aa7d4575d5ff8006e1454f69e4033193fc0 | 71a9ab642f9a31f4a318cebec7fe6a075870a83c | refs/heads/master | 2023-08-25T13:51:02.116705 | 2021-10-19T01:06:49 | 2021-10-19T01:06:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | PUBLISH_CAMPAIGN = 100, "Publishing Campaign."
PUBLISH_DRIP_CAMPAIGN = 150, "Publishing Drip Campaign."
PAUSE_CAMPAIGN = 300, "Pausing Campaign."
RESUME_CAMPAIGN = 150, "Resuming Campaign."
CAMPAIGN_SIGNATURES = 110, "Generated signatures for publishing campaign manager."
ENQUEUED_FROM_ADMIN = 120, "Enqueued from Admin."
| [
"[email protected]"
] | |
fad6d9954e6d45a6f1f6d6b53e441cbbf1dfdbd2 | a6bb71f951c104ea9a3eb9d7a4ab413f45e84c5b | /aula48-ex01.py | af20f27cc7c9fed8872d8d98839f46245e52fe40 | [] | no_license | romulorm/udemy-python | 7e15621014c124190c18f3e67d6f6fa1f400e4d1 | 4a03ae860b9febad9f04bba0aabda211ccb0169d | refs/heads/master | 2021-05-24T15:07:01.113977 | 2021-03-13T19:28:32 | 2021-03-13T19:28:32 | 253,619,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | '''
Crie uma função que exiba uma saudação com os parâmetros "saudação" e "nome".
'''
def saudacao(msg='Olá', nome='usuário'):
print(f'{msg}, {nome}.')
# PROGRAMA PRINCIPAL
saudacao('Boa noite', 'Leandro')
saudacao('Boa tarde', 'Maria')
saudacao('Hey', 'Joe')
| [
"[email protected]"
] | |
2a697457b8a54b77ebffa2225054966f1aafc36f | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4041/434004041.py | 58a9642917b1bb82347f7247422a5ead994c4d21 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 558 | py | from bots.botsconfig import *
from records004041 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'RJ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BRR', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 10},
{ID: 'N1', MIN: 1, MAX: 25, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
671ef21cb1eabf8b44e4076470681c54059aab6a | cc3f6702388ee8e2f59ade79d1a09b5977934c5c | /run.py | 4e3bd6c6c0ec3bdc0407c415ae3ec0feeffddad1 | [] | no_license | tonyguesswho/fsk | b95024ce90a81a0e1618b423e72c91306ebcd425 | 18de44b37cf5ee7976029d7b2d97af3108813279 | refs/heads/master | 2020-06-01T10:33:26.570197 | 2019-06-07T15:12:19 | 2019-06-07T15:12:19 | 190,750,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from app import app
from db import db
db.init_app(app)
@app.before_first_request
def create_tables():
db.create_all() | [
"[email protected]"
] | |
4628f1a901997ee1e00f6288b3b18bed3cca60c5 | 9172bc5c472a89e62444bfb164689d23fb8f1549 | /zh_spider/spiders/example.py | 60a25b86eb028eccfa6f54a893c9e692b7fe3b1e | [
"Apache-2.0"
] | permissive | szqh97/zspider | 74bde989cb3b83384bbe6b4abaeafaac90f02946 | 204bf44fb6de74f122a8678c8141d2743ef9706e | refs/heads/master | 2021-01-10T03:15:54.088548 | 2016-01-05T06:27:27 | 2016-01-05T06:27:27 | 48,946,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | # -*- coding: utf-8 -*-
"""
TODO
"""
import scrapy
from scrapy.http.request import Request
class example(scrapy.Spider):
"""
This is a zhihu user crawler spider
"""
name = "example"
allowed_domains = ["https://zhihu.com"]
start_urls = (
'https://www.zhihu.com/people/excited-vczh',
#'https://www.zhihu.com/people/excited-vczh/followees',
)
cookie = '_za=345605f6-eefa-4af6-a945-f2b790d12061; _ga=GA1.2.322499337.1436092356; q_c1=4b1955e3c8aa45ba930b1f40abe7c4ca|1449306839000|1436092370000; z_c0="QUFEQTVjMGVBQUFYQUFBQVlRSlZUVGl2clZieTl6NmN1Z19JX0oxczJubWh0QmIyRGoxRjRnPT0=|1451631160|c67789a061fac4d814e558bf5e0964e398efa6e4"; __utma=51854390.322499337.1436092356.1451631139.1451637264.14; __utmz=51854390.1451637264.14.8.utmcsr=zhihu.com|utmccn=(referral)|utmcmd=referral|utmcct=/people/excited-vczh/followees; __utmv=51854390.100-1|2=registration_date=20131007=1^3=entry_date=20131007=1; _xsrf=3130d2c61e1c6d68615d5046fa9d1714; cap_id="YThhNjgyNmUxYzYxNDJkM2JmNjk1MzU5OGVhNzA5NjE=|1451631135|c1a47afe58d0fdbcba7f0f524ca913809b709b79"; __utmc=51854390',
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Encoding': 'gzip, deflate",
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Cookie': cookie,
'Host': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com/people/szqh97/followees',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'
}
def start_requests(self):
for url in self.start_urls:
yield Request(url, headers=self.headers, callback = self.parse_first_request)
def parse(self, response):
print response.body
pass
def parse_first_request(self, response):
print response.body
self._xsrf = response.xpath('//input[@name="_xsrf"]').xpath("@value").extract()[0]
print 'kkkkkkkk', self._xsrf
| [
"[email protected]"
] | |
debf561cc18693e3e094f31b1cb9df8a948b1a11 | 163fe2b466f3f2b5f3f9f218f3252f2cf78afa65 | /begoodPlus/pages/context_processors.py | 019c457fb3419b411ba317f3d83ac69ffcfaa6bf | [] | no_license | nhrnhr0/BeGoodPlus3 | ec711952efcb267f4a9e72b0fb6e370ef5fea04c | 8acead88ccba6d7427b945a7eb98562922d39cd9 | refs/heads/master | 2023-07-30T15:03:41.524737 | 2021-09-26T10:16:04 | 2021-09-26T10:16:04 | 349,965,663 | 0 | 0 | null | 2021-06-15T06:32:39 | 2021-03-21T10:34:15 | JavaScript | UTF-8 | Python | false | false | 224 | py |
from catalogAlbum.models import CatalogAlbum
from django.conf import settings
def navbar_load(request):
albums = CatalogAlbum.objects.all()
return {'albums': albums,
'domain': settings.MY_DOMAIN,
} | [
"[email protected]"
] | |
df0718ed19926840558ee2e12230c01ad56baf3d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_repents.py | 93675035cf55791d81d47ce5ead5081fe5e2a5b8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _REPENTS():
def __init__(self,):
self.name = "REPENTS"
self.definitions = repent
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['repent']
| [
"[email protected]"
] | |
f7f24236f8bf9fb9ba726c09b19bf01307014a4d | 6450234cc5339e9d05102b25b25ba38e2bd9e4cb | /METAnalysis/python/multiplicityReweightedFits.py | c95d50e5b16041d44fef98f33910734426ce69e3 | [] | no_license | wa01/Workspace | 57b87481005c441ab91a8180ddf6ea00b520aca7 | 47759c6a20473f7a694ca9e3fd4e0e8343c8018c | refs/heads/master | 2021-01-15T10:36:55.429420 | 2014-09-20T17:44:54 | 2014-09-20T17:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,944 | py | import ROOT
import pickle
from commons import label
from Workspace.HEPHYPythonTools.helpers import getVarValue
from math import pi, cos, sin, sqrt, atan2
ROOT.gROOT.ProcessLine(".L ../../HEPHYPythonTools/scripts/root/tdrstyle.C")
ROOT.setTDRStyle()
from commons import *
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--sample", dest="sample", default="dy53X", type="string", action="store", help="samples:Which samples.")
parser.add_option("--prefix", dest="prefix", default="", type="string", action="store", help="prefix:Which prefix.")
parser.add_option("--maps", dest="maps", default='all', type="string", action="store", help="samples:Which maps.")
parser.add_option("--small", dest="small", action="store_true", help="Just do a small subset.")
parser.add_option("--useMapWeight", dest="useMapWeight", action="store_true", help="use the stored map weight")
(options, args) = parser.parse_args()
prefixes=[]
if options.prefix!='':
prefixes.append(options.prefix)
if options.small:
prefixes.append('small')
if options.useMapWeight:
prefixes.append('useMapWeight')
prefix = '_'.join(prefixes)
if prefix!='':
prefix+='_'
print "options: sample",options.sample, 'maps', options.maps, 'small',options.small,'useMapWeight',options.useMapWeight,'prefix',options.prefix
if options.maps=='all':
maps = allMaps
else:
exec("maps = [" +options.maps+ "]")
c = ROOT.TChain('Events')
if options.sample == 'dy53X':
#sample = 'MinimumBias-Run2012A-22Jan2013'
if options.small:
# c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_from0To1.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/dy53X_dy53X_rwTo_flat/histo_dy53X_from10To11.root')
else:
# c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/dy53X_dy53X_rwTo_flat/histo_dy53X_from*')
if options.sample.lower().count('doublemu') or options.sample.lower().count('minimumbias') or options.sample.lower()=='ttjets':
if options.small:
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_0.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_1.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_2.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_3.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_4.root')
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*_5.root')
else:
c.Add('/data/schoef/convertedMETTuples_v2/inc/'+options.sample+'/histo_'+options.sample+'*.root')
h['fitRange'] = [0,1200]
h0Barrel['fitRange'] = [0,30]
h0EndcapPlus['fitRange'] = [0,30]
h0EndcapMinus['fitRange'] = [0,30]
gammaBarrel['fitRange'] = [0,500]
gammaEndcapPlus['fitRange'] = [0,150]
gammaEndcapMinus['fitRange'] = [0,150]
gammaForwardPlus['fitRange'] = [0,10]
gammaForwardMinus['fitRange'] = [0,10]
e['fitRange'] = [0,10]
mu['fitRange'] = [0,10]
h_HF_Minus['fitRange'] = [10,250]
h_HF_Plus['fitRange'] = [10,250]
h_HF_InnerMostRingsMinus['fitRange'] = [0,50]
h_HF_InnerMostRingsPlus['fitRange'] = [0,50]
egamma_HF_Minus['fitRange'] = [0,250]
egamma_HF_Plus['fitRange'] = [0,250]
egamma_HF_InnerMostRingsMinus['fitRange'] = [0,50]
egamma_HF_InnerMostRingsPlus['fitRange'] = [0,50]
h_HF['fitRange'] = [0,500]
egamma_HF['fitRange'] = [0,500]
h['zoomRange'] = [-20,20]
h0Barrel['zoomRange'] = [-20,20]
h0EndcapPlus['zoomRange'] = [-20,20]
h0EndcapMinus['zoomRange'] = [-20,20]
gammaBarrel['zoomRange'] = [-20,20]
gammaEndcapPlus['zoomRange'] = [-20,20]
gammaEndcapMinus['zoomRange'] = [-20,20]
gammaForwardPlus['zoomRange'] = [-20,20]
gammaForwardMinus['zoomRange'] = [-20,20]
e['zoomRange'] = [-20,20]
mu['zoomRange'] = [-20,20]
h_HF_Minus['zoomRange'] = [-5,5]
h_HF_Plus['zoomRange'] = [-5,5]
h_HF_InnerMostRingsMinus['zoomRange'] = [-5,5]
h_HF_InnerMostRingsPlus['zoomRange'] = [-5,5]
egamma_HF_Minus['zoomRange'] = [-5,5]
egamma_HF_Plus['zoomRange'] = [-5,5]
egamma_HF_InnerMostRingsMinus['zoomRange'] = [-5,5]
egamma_HF_InnerMostRingsPlus['zoomRange'] = [-5,5]
h_HF['zoomRange'] = [-5,5]
egamma_HF['zoomRange'] = [-5,5]
makeN2Fit = [m['name'] for m in [ h_HF, h_HF_Minus, h_HF_Plus, h_HF_InnerMostRingsMinus, h_HF_InnerMostRingsPlus]]
makeN2Fit += [m['name'] for m in [ egamma_HF, egamma_HF_Minus, egamma_HF_Plus, egamma_HF_InnerMostRingsMinus, egamma_HF_InnerMostRingsPlus]]
#for map in [h_HF, egamma_HF]:
for map in maps:
#for map in [ h_HF, egamma_HF, h_HF_Minus, h_HF_Plus, h_HF_InnerMostRingsMinus, h_HF_InnerMostRingsPlus]:
candSelCut = 'candId=='+str(label[map['type']])+'&&candEta>'+str(map['binning'][1])+'&&candEta<='+str(map['binning'][2])
print 'sample',options.sample,"candSelCut", candSelCut
if options.useMapWeight:
weightString = '*candW'
else:
weightString = ''
px=ROOT.TProfile("p_MEx_"+map['name'],"p_MEx"+map['name'],*(map['candBinning']+[-200,200,'i']))
py=ROOT.TProfile("p_MEy_"+map['name'],"p_MEy"+map['name'],*(map['candBinning']+[-200,200,'i']))
if map['name'] in makeN2Fit:
fx = ROOT.TF1('fx', '[0]*x**2+[1]*x',*(map['fitRange']))
else:
fx = ROOT.TF1('fx', '[0]*x',*(map['fitRange']))
c.Draw('Sum$(-('+candSelCut+')*candPt*cos(candPhi)'+weightString+'):Sum$('+candSelCut+')>>p_MEx_'+map['name'],'','goff')
px.Fit(fx, 'R')
param_x = '('+str(fx.GetParameter(0))+')*Sum$('+candSelCut+')'
if map['name'] in makeN2Fit:
param_x +="**2"
param_x += '+('+str(fx.GetParameter(1))+')*Sum$('+candSelCut+')'
if map['name'] in makeN2Fit:
fy = ROOT.TF1('fy', '[0]*x**2+[1]*x',*(map['fitRange']))
else:
fy = ROOT.TF1('fy', '[0]*x',*(map['fitRange']))
c.Draw('Sum$(-('+candSelCut+')*candPt*sin(candPhi)'+weightString+'):Sum$('+candSelCut+')>>p_MEy_'+map['name'],'','goff')
py.Fit(fy,'R')
param_y = '('+str(fy.GetParameter(0))+')*Sum$('+candSelCut+')'
if map['name'] in makeN2Fit:
param_y +="**2"
param_y += '+('+str(fy.GetParameter(1))+')*Sum$('+candSelCut+')'
result = {'fx':fx.Clone(),'fy':fy.Clone(), 'candCount':'Sum$('+candSelCut+')', 'MEx':'Sum$(-('+candSelCut+')*candPt*cos(candPhi))', 'MEy':'Sum$(-('+candSelCut+')*candPt*sin(candPhi))', 'param_x':param_x, 'param_y':param_y}
if not options.small and not options.useMapWeight:
pickle.dump(result, file('/data/schoef/tools/metPhiShifts/shift_'+prefix+options.sample+'_'+map['name']+'.pkl', 'w'))
c1 = ROOT.TCanvas()
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptFit(0)
px.Draw('h')
px.GetXaxis().SetTitle("multiplicity in "+map['name'])
px.GetYaxis().SetTitle("<#slash{E}_{x,y}> (GeV)")
# px.GetXaxis().SetLabelSize(0.04)
px.GetXaxis().SetTitleSize(0.05)
px.GetXaxis().SetTitleOffset(1.1)
px.GetYaxis().SetRangeUser(*(map['zoomRange']))
px.SetLineColor(ROOT.kBlue)
px.SetLineStyle(0)
px.SetLineWidth(2)
px.SetMarkerStyle(0)
px.SetMarkerSize(0)
# py.GetYaxis().SetRangeUser(-20,20)
py.SetLineColor(ROOT.kRed)
py.SetLineStyle(0)
py.SetLineWidth(2)
py.SetMarkerStyle(0)
py.SetMarkerSize(0)
py.Draw('hsame')
if map['name'] in makeN2Fit:
lines = [ [0.18, 0.78, "<#slash{E}_{x}> = 10^{-6}#upoint ("+str(round(10**6*fx.GetParameter(0),1))+'#pm '+str(round(10**6*abs(fx.GetParError(0)),1))+") #upoint n^{2}+10^{-3}#upoint("+str(round(10**3*fx.GetParameter(1),1))+'#pm '+str(round(10**3*abs(fx.GetParError(1)),1))+") #upoint n"],
[0.18, 0.73, "<#slash{E}_{y}> = 10^{-6}#upoint ("+str(round(10**6*fy.GetParameter(0),1))+'#pm '+str(round(10**6*abs(fy.GetParError(0)),1))+") #upoint n^{2}+10^{-3}#upoint("+str(round(10**3*fy.GetParameter(1),1))+'#pm '+str(round(10**3*abs(fy.GetParError(1)),1))+") #upoint n"]]
else:
lines = [ [0.4, 0.78, "<#slash{E}_{x}> = 10^{-3} #upoint ("+str(round(10**3*fx.GetParameter(0),1))+'#pm '+str(round(10**3*abs(fx.GetParError(0)),1))+") #upoint n"],
[0.4, 0.73, "<#slash{E}_{y}> = 10^{-3} #upoint ("+str(round(10**3*fy.GetParameter(0),1))+'#pm '+str(round(10**3*abs(fy.GetParError(0)),1))+") #upoint n"]]
latex = ROOT.TLatex();
latex.SetNDC();
latex.SetTextSize(0.04);
latex.SetTextAlign(11); # align right
for line in lines:
latex.SetTextSize(0.04)
latex.DrawLatex(line[0],line[1],line[2])
l = ROOT.TLegend(0.55,0.83,.95,.95)
l.AddEntry(px, "< #slash{E}_{x} >")#+etab[0].split('_')[0]+", "+shortName[etab[0].split('_')[1]])
l.AddEntry(py, "< #slash{E}_{y} >")
l.SetFillColor(0)
l.SetShadowColor(ROOT.kWhite)
l.SetBorderSize(1)
l.Draw()
c1.Print('/afs/hephy.at/user/s/schoefbeck/www/pngPF/'+prefix+options.sample+'_candidateBased_MExy_'+map['name']+'.png')
c1.Print('/afs/hephy.at/user/s/schoefbeck/www/pngPF/'+prefix+options.sample+'_candidateBased_MExy_'+map['name']+'.root')
del px, py, l, c1
| [
"[email protected]"
] | |
cfbb894dced5120522fc3ed37442e6412c55cbb1 | 2a1ce1846dc4430f22c0f07c1d52ce8f0affde62 | /mobi/wsgi.py | aa06d265a04f585da3cdf7e33a778c5f4de0f144 | [] | no_license | Belie06Loryn/MobiCashChallenge | e8708cf475b39722f35540a850eac1adc20cfaac | 58144494160e3dada66374131b317a11a0d5b0e1 | refs/heads/master | 2022-11-27T16:50:53.613486 | 2020-05-14T08:05:14 | 2020-05-14T08:05:14 | 236,824,658 | 0 | 0 | null | 2022-11-22T04:57:48 | 2020-01-28T19:41:51 | Python | UTF-8 | Python | false | false | 386 | py | """
WSGI config for mobi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mobi.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
f597058f292cdfbdc5b9f20f0197c632bca897be | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_mesksr_Counting Sheep.py | c3235f1be7a672b4553acacad7c7fb910a20c4d0 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 412 | py | for t in xrange(int(raw_input())):
print "Case #"+str(t+1)+":",
n = int(raw_input())
if (n==0):
print 'INSOMNIA'
continue
c = [False]*10
i = 1
while(i):
temp = n*i
#print temp,
while(temp>0):
c[temp%10]=True
temp = temp/10
flag = 1
for j in range(10):
if (c[j]==False):
#print n*i, c
i+=1
flag = 0
break
if (flag==1):
break
print i*n
| [
"[[email protected]]"
] | |
c8dd804c964155bd1df00e45fd3bfe5049e4ead0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/ESPCN_ID2919_for_PyTorch/utils.py | a0397e06689a1d6780d3782191eb3493be04b221 | [
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,742 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import torch
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class AverageMeter(object):
""" Function to Compute and store the average and current value
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def calculate_psnr(img1, img2):
""" Function to calculate the Peak Signal to Noise Ratio (PSNR)
:param img1: model output Isr image
:param img2: ground truth Ihr image
:return: Peak Signal to Noise Ratio between two images
"""
return 10. * torch.log10(1. / torch.mean((img1 - img2) ** 2)) | [
"[email protected]"
] | |
b4f1975b22f360941d3c806c66a92cd130121e85 | 6f4e925bf4538d104f1e3e9754d4297c5504ab80 | /python/recall/app/core/filelib.py | 4e63aad8618229b0a6bd6f71d4079d4b5bfecad2 | [
"MIT"
] | permissive | kingreatwill/openself | 7f02282da3e0b1f328c3627d83ba2b5ed4563dc8 | 8517d24e665b39371835ecd2ed0cd3509a5d9d62 | refs/heads/master | 2023-01-23T13:15:49.491402 | 2020-11-19T02:39:52 | 2020-11-19T02:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | import hashlib
import magic
# pip install python-magic-bin
# pip install python-magic
def __sha1(f):
sha1obj = hashlib.sha1()
sha1obj.update(f.read())
return sha1obj.hexdigest()
def __md5(f):
md5obj = hashlib.md5()
md5obj.update(f.read())
return md5obj.hexdigest()
# 一定要是rb
def sha1(file):
if isinstance(file, str):
with open(file, 'rb') as f:
return __sha1(f)
result = __sha1(file)
file.seek(0)
return result
# 一定要是rb
def md5(file):
if isinstance(file, str):
with open(file, 'rb') as f:
return __md5(f)
result = __md5(file)
file.seek(0)
return result
f = magic.Magic(mime=True, uncompress=True)
def mime(file):
if isinstance(file, str):
return f.from_file(file)
buff = file.read(2048)
file.seek(0)
return f.from_buffer(buff)
| [
"[email protected]"
] | |
20377d1afff2b99076cac037dae5436eae9bc44d | 6a609bc67d6a271c1bd26885ce90b3332995143c | /exercises/binary-tree/lowest_common_ancester_of_a_binary_tree.py | a97468a75332c15acab7e46d66420ce78c3968f5 | [] | no_license | nahgnaw/data-structure | 1c38b3f7e4953462c5c46310b53912a6e3bced9b | 18ed31a3edf20a3e5a0b7a0b56acca5b98939693 | refs/heads/master | 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | py | # -*- coding: utf-8 -*-
"""
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes v and w as the lowest node in T that has both v and w as descendants (where we allow a node to be a descendant of itself).”
_______3______
/ \
___5__ ___1__
/ \ / \
6 _2 0 8
/ \
7 4
For example, the lowest common ancestor (LCA) of nodes 5 and 1 is 3. Another example is LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to the LCA definition.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root in [None, p, q]: return root
left, right = (self.lowestCommonAncestor(child, p, q) for child in (root.left, root.right))
return root if left and right else left or right
def lowestCommonAncestor2(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root is None:
return None
# Find out all the parent-child pair above (including) p and q.
stack = [root]
parents = {root: None}
while p not in parents or q not in parents:
node = stack.pop()
if node.left is not None:
stack.append(node.left)
parents[node.left] = node
if node.right is not None:
stack.append(node.right)
parents[node.right] = node
# Get all the ancestors of p.
ancestors = set()
while p:
ancestors.add(p)
p = parents[p]
# Match q's ancestors.
while q not in ancestors:
q = parents[q]
return q
| [
"[email protected]"
] | |
098519dba7eb3d90f4913982295bb307463d8fe9 | e47d5da2a947c3b3a834817d0b084ee65d302067 | /atcoder.jp/abc150/abc150_a/Main.py | a485c4008d479ffea1699367b473b4ccc6a9ffa0 | [] | no_license | aki-nlp/AtCoder | 3293b9b183c0a8cefbf20d7f4f491c6f1e7604b8 | 9385805cbb1fa158f6d3c4a2415cdf7ba94547e5 | refs/heads/master | 2023-02-25T06:04:10.913237 | 2020-10-03T12:02:00 | 2020-10-03T12:02:00 | 296,792,313 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | k, x = map(int, input().split())
if 500*k >= x:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
5dcc87d0ce772cdb6f82fc8e7f36c35aeffb7ffe | 062376f05b517606c0257d4855d8d93d482137c1 | /tests/c/test_rand.py | 0a2c3c11f7ed4dde5c6e0710061033828e451c38 | [
"Apache-2.0"
] | permissive | exarkun/opentls | 7047a68ce48c7d9c7aa67458693f7406a4ce60a9 | cc651c597164b26a3dab2b9dd90dd375554ea74c | refs/heads/master | 2021-01-15T21:09:49.086756 | 2013-05-28T12:14:11 | 2013-05-28T12:14:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,811 | py | """Test RAND API
The objective is to test the API wrapper, not the underlying random number
generators. The tests implemented were derived from John D Cook's chapter
in 'Beautiful Testing' titled 'How to test a random number generator'.
http://www.johndcook.com/blog/2010/12/06/how-to-test-a-random-number-generator-2/
"""
from __future__ import absolute_import, division, print_function
import ctypes
import math
import random
try:
import unittest2 as unittest
except ImportError:
import unittest
from tls.c import api
def cumulative_average(average=0.0, samples=0.0):
"""Generator to keep track of the current cummulative moving average.
To use:
>>> average = cumulative_average()
>>> average.send(None)
>>> for value in [1, 2, 3, 4]:
... mean = average.send(value)
... print mean
2.5
The function arguments `average` and `samples` can be used to set the
cumulative average's initial state.
http://en.wikipedia.org/wiki/Moving_average#Cumulative_moving_average
"""
cma = average
cnt = samples
while True:
new = yield cma
cnt += 1.0
cma = cma + (new - cma) / cnt
class RandTests(object):
def test_range(self):
"""Test extremes of valid range for random values has been generated.
The probability of failure is less than 0.005e-17 for 10000 samples.
"""
low = min(self.data)
high = max(self.data)
self.assertEqual(high, 255)
self.assertEqual(low, 0)
def test_median(self):
"""Test that the median is "close" to the expected mean."""
sorted_ = sorted(self.data)
median = sorted_[self.samples // 2]
self.assertAlmostEqual(median, 127.5, delta=5.0)
def test_mean(self):
"""Test the actual mean is "close" to the expected mean."""
average = cumulative_average()
average.send(None)
for value in self.data:
mean = average.send(value)
self.assertAlmostEqual(mean, 127.5, delta=3.0)
def test_variance(self):
"""Test the variance is "close" to the expected mean."""
expected_mean = 255 // 2
average = cumulative_average()
average.send(None)
for value in self.data:
deviation_squared = (value - expected_mean) ** 2
variance = average.send(deviation_squared)
expected_variance = (expected_mean // 2) ** 2
self.assertAlmostEqual(variance, expected_variance, delta=expected_variance // 2)
def test_buckets(self):
"""Test the distribution of values across the range."""
counts = {}
for value in self.data:
counts[value] = 1 + counts.get(value, 0)
for value, count in counts.items():
self.assertGreater(count, 0)
self.assertLess(count, 2.0 * (self.samples / 255.0))
def test_kolmogorov_smirnov(self):
"""Apply the Kolmogorov-Smirnov goodness-of-fit function.
Range values for K+ sourced from 'Beautiful Testing'
"""
samples = 1e3
counts = {}
for num, value in enumerate(self.data):
if num >= samples:
break
for x in range(value + 1):
counts[x] = 1 + counts.get(x, 0)
empirical = [counts.get(i,0) / samples for i in range(256)]
theoretical = [1.0 - (x / 255.0) for x in range(256)]
kplus = math.sqrt(samples) * max(empirical[i] - theoretical[i] for i in range(256))
self.assertGreaterEqual(kplus, 0.07089)
self.assertLessEqual(kplus, 1.5174)
#kminus = math.sqrt(samples) * max(theoretical[i] - empirical[i] for i in range(256))
#self.assertGreaterEqual(kminus, 0.07089)
#self.assertLessEqual(kminus, 1.5174)
class TestPRNG(unittest.TestCase, RandTests):
"""Test OpenSSL's pseudo random number generator"""
samples = int(1e4)
data = api.new('unsigned char[]', samples)
@classmethod
def setUpClass(cls):
if not api.RAND_status():
api.RAND_load_file(b"/dev/urandom", 1024)
api.RAND_pseudo_bytes(api.cast('unsigned char*', cls.data), cls.samples)
def setUp(self):
self.assertTrue(api.RAND_status())
class TestCryptoRNG(unittest.TestCase, RandTests):
"""Test OpenSSL's crytographically valid random data"""
samples = int(1e4)
data = api.new('unsigned char[]', samples)
@classmethod
def setUpClass(cls):
api.RAND_bytes(api.cast('unsigned char*', cls.data), cls.samples)
class TestPyRandom(unittest.TestCase, RandTests):
"""Test Python's Mersenne Twister implementation"""
samples = int(1e4)
@classmethod
def setUpClass(cls):
cls.data = [random.randint(0, 255) for i in range(cls.samples)]
| [
"[email protected]"
] | |
5714f1085c99c3174fc30c88db97f22834805597 | 1ab52d160b72f4b5bc6776175c1a8aaf3cb86e0a | /fb-posts/01_python/22_format_number.py | a0ce2f73453c4f3fa97910f235202a4bd785d4fa | [] | no_license | krakowiakpawel9/live-python | ea3598ceeafca827b53efdf71a6d3156a6fa25b4 | 237b186b48b51f58450290f5ed0146041e0a1135 | refs/heads/master | 2021-04-17T06:10:53.141136 | 2021-03-06T09:53:47 | 2021-03-06T09:53:47 | 249,418,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | import math
print(f'Pi: {math.pi}')
print(f'Pi: {math.pi:.2f}')
print(f'Pi: {math.pi:.4f}')
| [
"[email protected]"
] | |
3a598385ff90c8c29086dd7913ffb91721b0b6bd | ea6a9bcf02fe7d72df645302909b1de63cdf7fe0 | /test/functional/wallet_listreceivedby.py | ed70700f50b83a960e284a36d7294e4bcac3a672 | [
"MIT"
] | permissive | durgeshkmr/minicoin | 69a834786413122eb2b85731b20f0fda931c7a72 | 4f082abe13cd34a759bf8ffb344a49244615960e | refs/heads/master | 2020-05-04T22:59:23.367524 | 2019-04-06T16:13:28 | 2019-04-06T16:13:28 | 179,529,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,242 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listreceivedbyaddress RPC."""
from decimal import Decimal
from test_framework.test_framework import MinicoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
sync_blocks,
)
class ReceivedByTest(MinicoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def import_deterministic_coinbase_privkeys(self):
assert_equal(0, len(self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True)))
super().import_deterministic_coinbase_privkeys()
self.num_cb_reward_addresses = len(self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True))
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Generate block to get out of IBD
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("listreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{},
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True)
# Empty Tx
empty_addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0, True),
{"address": empty_addr},
{"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []})
# Test Address filtering
# Only on addr
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True, address_filter=addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Test for regression on CLI calls with address string (#14173)
cli_res = self.nodes[1].cli.listreceivedbyaddress(0, True, True, addr)
assert_array_result(cli_res, {"address": addr}, expected)
assert_equal(len(cli_res), 1)
# Error on invalid address
assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling")
# Another address receive money
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 2 + self.num_cb_reward_addresses) # Right now 2 entries
other_addr = self.nodes[1].getnewaddress()
txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1)
self.nodes[0].generate(1)
self.sync_all()
# Same test as above should still pass
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 11, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Same test as above but with other_addr should still pass
expected = {"address": other_addr, "label": "", "amount": Decimal("0.1"), "confirmations": 1, "txids": [txid2, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_array_result(res, {"address": other_addr}, expected)
assert_equal(len(res), 1)
# Should be two entries though without filter
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 3 + self.num_cb_reward_addresses) # Became 3 entries
# Not on random addr
other_addr = self.nodes[0].getnewaddress() # note on node[0]! just a random addr
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_equal(len(res), 0)
self.log.info("getreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.0"))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
# Trying to getreceivedby for an address the wallet doesn't own should return an error
assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr)
self.log.info("listreceivedbylabel + getreceivedbylabel Test")
# set pre-state
label = ''
address = self.nodes[1].getnewaddress()
assert_equal(self.nodes[1].getaddressinfo(address)['label'], label)
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0]
balance_by_label = self.nodes[1].getreceivedbylabel(label)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbylabel should return received_by_label_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
received_by_label_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label)
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbylabel should return updated received list
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
{"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))})
# getreceivedbylabel should return updated receive total
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label + Decimal("0.1"))
# Create a new label named "mynewlabel" that has a 0 balance
address = self.nodes[1].getnewaddress()
self.nodes[1].setlabel(address, "mynewlabel")
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel(0, True) if r["label"] == "mynewlabel"][0]
# Test includeempty of listreceivedbylabel
assert_equal(received_by_label_json["amount"], Decimal("0.0"))
# Test getreceivedbylabel for 0 amount labels
balance = self.nodes[1].getreceivedbylabel("mynewlabel")
assert_equal(balance, Decimal("0.0"))
if __name__ == '__main__':
ReceivedByTest().main()
| [
"[email protected]"
] | |
52bbc6eb71c3b869b7e93c49b39012fbc7bcf6d7 | 4dc31dd8bfd10945f7a5fee31f30c2e634b0534e | /clark_wright.py | 6687d2af2ef4aac6506b4f9eb43d76d106c12b74 | [] | no_license | cfld/clark_wright | 9b89201e1bde33ca9a89cedaaa45bf123578d3ec | c67019c7fa72d3218c1d598968c3c5983277ffa7 | refs/heads/master | 2022-11-05T05:42:48.484236 | 2020-06-16T03:33:48 | 2020-06-16T03:33:48 | 245,242,814 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,452 | py |
class CW:
def __init__(self, edges, D_depot, demand, cap):
self.edges = edges
self.D_depot = D_depot
self.demand = demand
self.cap = cap
self.visited = set([])
self.boundary = set([])
self.routes = {}
self.node2route = {}
self.route_idx = 0
def _new_route(self, src, dst):
load = self.demand[src] + self.demand[dst]
cost = self.D_depot[src] + self.edges[(src, dst)] + self.D_depot[dst]
if load > self.cap:
return
self.visited.add(src)
self.visited.add(dst)
self.boundary.add(src)
self.boundary.add(dst)
self.node2route[src] = self.route_idx
self.node2route[dst] = self.route_idx
self.routes[self.route_idx] = {
"idx" : self.route_idx,
"nodes" : [src, dst],
"load" : load,
"cost" : cost,
}
self.route_idx += 1
def _extend_route(self, a, b):
r = self.routes[self.node2route[a]]
new_load = r['load'] + self.demand[b]
new_cost = r['cost'] + self.edges[(a, b)] + self.D_depot[b] - self.D_depot[a]
if new_load > self.cap:
return
self.visited.add(b)
self.boundary.remove(a)
self.boundary.add(b)
if r['nodes'][0] == a:
r['nodes'].insert(0, b)
elif r['nodes'][-1] == a:
r['nodes'].append(b)
else:
raise Exception('not in right position')
r['load'] = new_load
r['cost'] = new_cost
self.node2route[b] = r['idx']
def _merge_route(self, src, dst):
r_src = self.routes[self.node2route[src]]
r_dst = self.routes[self.node2route[dst]]
new_load = r_src['load'] + r_dst['load']
new_cost = r_src['cost'] + r_dst['cost'] + self.edges[(src, dst)] - self.D_depot[src] - self.D_depot[dst]
if new_load > self.cap:
return
self.boundary.remove(src)
self.boundary.remove(dst)
# reverse direction to fit
if r_src['nodes'][-1] != src:
r_src['nodes'] = r_src['nodes'][::-1]
if r_dst['nodes'][0] != dst:
r_dst['nodes'] = r_dst['nodes'][::-1]
del self.routes[self.node2route[src]]
del self.routes[self.node2route[dst]]
r = {
"idx" : self.route_idx,
"nodes" : r_src['nodes'] + r_dst['nodes'],
"load" : new_load,
"cost" : new_cost,
}
for n in r['nodes']:
self.node2route[n] = self.route_idx
self.routes[self.route_idx] = r
self.route_idx += 1
def _fix_unvisited(self):
# fix customers that haven't been visited
for n in range(self.demand.shape[0]):
if n not in self.visited:
routes[self.route_idx] = {
"idx" : self.route_idx,
"nodes" : [n],
"load" : self.demand[n],
"cost" : 2 * self.D_depot[n],
}
self.route_idx += 1
def run(self):
for (src, dst) in self.edges.keys():
src_visited = src in self.visited
dst_visited = dst in self.visited
src_boundary = src in self.boundary
dst_boundary = dst in self.boundary
if src > dst:
pass
if src_visited and not src_boundary:
pass
elif dst_visited and not dst_boundary:
pass
elif not src_visited and not dst_visited:
self._new_route(src, dst)
elif src_boundary and not dst_visited:
self._extend_route(src, dst)
elif dst_boundary and not src_visited:
self._extend_route(dst, src)
elif src_boundary and dst_boundary and (self.node2route[src] != self.node2route[dst]):
self._merge_route(src, dst)
else:
pass
self._fix_unvisited()
return self.routes
| [
"[email protected]"
] | |
74af7e3ff926e3da47aa9b11384fb3e63485b7f9 | 67a48a7a2db56247fdd84474efa35124565fd8b9 | /Codeforces/1699/1699a.py | e2bac4651f38bb954ff4ec2732093493998f0ebe | [] | no_license | qazz625/Competitive-Programming-Codes | e3de31f9276f84e919a6017b2cf781c946809862 | e5df9cdc4714d78b7b6a7535ed7a45e07d3781c3 | refs/heads/master | 2022-08-30T07:57:55.172867 | 2022-08-10T08:02:07 | 2022-08-10T08:02:07 | 242,182,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | t = int(input())
for _ in range(t):
n = int(input())
if n%2:
print(-1)
else:
print(0, 0, n//2) | [
"[email protected]"
] | |
c9b1d958615432b28619694863ab1e102dfdf03a | 90bf5542f285765a523697137ad9159d6afd6cd5 | /angulo/app2.py | fba511a8d6b9559f3e8b16d4c2133718e820700d | [] | no_license | AndersonAngulo/T09.Angulo.Damian | c844f8c2c21572c35ea032d0cad5fe46a9bccef1 | 4f1398dc0f6be9e1764527e9efd92ba30160d30e | refs/heads/master | 2020-11-26T13:46:03.902961 | 2019-12-20T18:46:50 | 2019-12-20T18:46:50 | 229,091,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | #funcion2
import libreria1
import os
resultado=libreria1.saludar(os.sys.argv[1],os.sys.argv[2])
print(resultado) | [
"[email protected]"
] | |
8c9398d7e075b0b3b7ea6961cd9ea1db72ad4eb3 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/brocade_mpls_rpc/show_mpls_interface_detail/__init__.py | 8d04b38e68d2ceaf5dd2047baec812b4a502c53c | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,377 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import output
class show_mpls_interface_detail(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-interface-detail. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__output',)
_yang_name = 'show-mpls-interface-detail'
_rest_name = 'show-mpls-interface-detail'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__output = YANGDynClass(base=output.output, is_leaf=True, yang_name="output", rest_name="output", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='output', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-interface-detail']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-interface-detail']
def _get_output(self):
"""
Getter method for output, mapped from YANG variable /brocade_mpls_rpc/show_mpls_interface_detail/output (output)
"""
return self.__output
def _set_output(self, v, load=False):
"""
Setter method for output, mapped from YANG variable /brocade_mpls_rpc/show_mpls_interface_detail/output (output)
If this variable is read-only (config: false) in the
source YANG file, then _set_output is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_output() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=output.output, is_leaf=True, yang_name="output", rest_name="output", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='output', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """output must be of a type compatible with output""",
'defined-type': "brocade-mpls:output",
'generated-type': """YANGDynClass(base=output.output, is_leaf=True, yang_name="output", rest_name="output", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='output', is_config=True)""",
})
self.__output = t
if hasattr(self, '_set'):
self._set()
def _unset_output(self):
self.__output = YANGDynClass(base=output.output, is_leaf=True, yang_name="output", rest_name="output", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='output', is_config=True)
output = __builtin__.property(_get_output, _set_output)
_pyangbind_elements = {'output': output, }
| [
"[email protected]"
] | |
83571e8f8f52a978baf403abc8e05e2ae13a9d0c | 72983931fd4b2408399281e01f146e6cd8c38bc4 | /tests/test_transducer.py | 8a78c678aeaebb5b657621b10c589a3c417094ec | [
"Apache-2.0",
"Python-2.0"
] | permissive | templeblock/TiramisuASR | 3a356a4b72d22a9463f9a9ff8801c39a7d88f0ba | 55b1789d956a6d1821b0ebb9ba53fc41e0fda9c2 | refs/heads/master | 2022-11-26T14:30:21.247362 | 2020-08-09T07:16:07 | 2020-08-09T07:16:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tiramisu_asr.models.transducer import Transducer
from tiramisu_asr.featurizers.text_featurizers import TextFeaturizer
from tiramisu_asr.featurizers.speech_featurizers import TFSpeechFeaturizer, read_raw_audio
from tiramisu_asr.utils.utils import merge_features_to_channels
text_featurizer = TextFeaturizer({
"vocabulary": None,
"blank_at_zero": True,
"beam_width": 5,
"norm_score": True
})
speech_featurizer = TFSpeechFeaturizer({
"sample_rate": 16000,
"frame_ms": 25,
"stride_ms": 10,
"num_feature_bins": 80,
"feature_type": "logfbank",
"preemphasis": 0.97,
# "delta": True,
# "delta_delta": True,
"normalize_signal": True,
"normalize_feature": True,
"normalize_per_feature": False,
# "pitch": False,
})
inp = tf.keras.Input(shape=[None, 80, 1])
enc = merge_features_to_channels(inp)
enc = tf.keras.layers.LSTM(350, return_sequences=True)(enc)
enc_model = tf.keras.Model(inputs=inp, outputs=enc)
model = Transducer(
encoder=enc_model,
blank=0,
vocabulary_size=text_featurizer.num_classes,
embed_dim=350, embed_dropout=0.0, num_lstms=1, lstm_units=320, joint_dim=1024
)
model._build([1, 50, 80, 1])
model.summary(line_length=100)
model.save_weights("/tmp/transducer.h5")
model.add_featurizers(
speech_featurizer=speech_featurizer,
text_featurizer=text_featurizer
)
features = tf.random.normal(shape=[5, 50, 80, 1], stddev=127., mean=247.)
hyps = model.recognize(features)
print(hyps)
signal = read_raw_audio("/home/nlhuy/Desktop/test/11003.wav", speech_featurizer.sample_rate)
# hyps = model.recognize_tflite(signal)
#
# print(hyps)
# hyps = model.recognize_beam(tf.expand_dims(speech_featurizer.tf_extract(signal), 0))
print(hyps)
# hyps = model.recognize_beam_tflite(signal)
#
# print(hyps.numpy().decode("utf-8"))
#
concrete_func = model.recognize_tflite.get_concrete_function()
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[concrete_func]
)
converter.experimental_new_converter = True
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS]
converter.convert()
| [
"[email protected]"
] | |
1de76da91028d525dbb288656b3b8acce8276384 | b18b01b32e67433a6e749e2aae48fb69bcfc42f9 | /titanic-kaggle-competition/pipeline-components/logisticregression/regression.py | 727696deb98b29a16ba65e63a26725e66584d214 | [
"Apache-2.0"
] | permissive | kubeflow/examples | f83c920cb94b32f0271103afb74b3efaaae35b41 | 40cba72b522ca6879672dca24398973c8f0ef32d | refs/heads/master | 2023-09-01T20:39:26.577041 | 2023-08-05T16:51:33 | 2023-08-05T16:51:33 | 119,894,375 | 1,375 | 813 | Apache-2.0 | 2023-08-30T14:40:56 | 2018-02-01T21:13:10 | Jsonnet | UTF-8 | Python | false | false | 1,199 | py | import numpy as np
import pandas as pd
# import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
import argparse
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
def regression(train_pickle, train_label):
train_df = pd.read_pickle(train_pickle)
train_labels = pd.read_pickle(train_label)
logreg = LogisticRegression(solver='lbfgs', max_iter=110)
logreg.fit(train_df, train_labels)
acc_log = round(logreg.score(train_df, train_labels) * 100, 2)
print('acc_log', acc_log)
with open('regression_acc.txt', 'a') as f:
f.write(str(acc_log))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_pickle')
parser.add_argument('--train_label')
args = parser.parse_args()
regression(args.train_pickle, args.train_label) | [
"[email protected]"
] | |
1e99d4e34865e1143f39cf81363cc1bc435452b4 | bd67c037639aef544df93b83ed72b4682cfb3e30 | /playhouse/tests/test_query_results.py | eb620e4c16ed43fd52e21753c766eaac25743b32 | [
"MIT"
] | permissive | ewarman/peewee | 80d0101853454ed3fb80ed3ee521ca84ff7d263e | 6cc4248f2dcd2f5f56c199414e619527c76a4784 | refs/heads/master | 2021-01-17T17:22:06.562724 | 2016-03-23T14:51:32 | 2016-03-23T14:51:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,277 | py | import itertools
import sys
from peewee import ModelQueryResultWrapper
from peewee import NaiveQueryResultWrapper
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import skip_test_if
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestQueryResultWrapper(ModelTestCase):
requires = [User, Blog, Comment]
def test_iteration(self):
User.create_users(10)
with self.assertQueryCount(1):
sq = User.select()
qr = sq.execute()
first_five = []
for i, u in enumerate(qr):
first_five.append(u.username)
if i == 4:
break
self.assertEqual(first_five, ['u1', 'u2', 'u3', 'u4', 'u5'])
names = lambda it: [obj.username for obj in it]
self.assertEqual(names(sq[5:]), ['u6', 'u7', 'u8', 'u9', 'u10'])
self.assertEqual(names(sq[2:5]), ['u3', 'u4', 'u5'])
another_iter = names(qr)
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
another_iter = names(qr)
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
def test_count(self):
User.create_users(5)
with self.assertQueryCount(1):
query = User.select()
qr = query.execute()
self.assertEqual(qr.count, 5)
# Calling again does not incur another query.
self.assertEqual(qr.count, 5)
with self.assertQueryCount(1):
query = query.where(User.username != 'u1')
qr = query.execute()
self.assertEqual(qr.count, 4)
# Calling again does not incur another query.
self.assertEqual(qr.count, 4)
def test_len(self):
User.create_users(5)
with self.assertQueryCount(1):
query = User.select()
self.assertEqual(len(query), 5)
qr = query.execute()
self.assertEqual(len(qr), 5)
with self.assertQueryCount(1):
query = query.where(User.username != 'u1')
qr = query.execute()
self.assertEqual(len(qr), 4)
self.assertEqual(len(query), 4)
def test_nested_iteration(self):
User.create_users(4)
with self.assertQueryCount(1):
sq = User.select()
outer = []
inner = []
for i_user in sq:
outer.append(i_user.username)
for o_user in sq:
inner.append(o_user.username)
self.assertEqual(outer, ['u1', 'u2', 'u3', 'u4'])
self.assertEqual(inner, ['u1', 'u2', 'u3', 'u4'] * 4)
def test_iteration_protocol(self):
User.create_users(3)
with self.assertQueryCount(1):
query = User.select().order_by(User.id)
qr = query.execute()
for _ in range(2):
for user in qr:
pass
i = iter(qr)
for obj in i:
pass
self.assertRaises(StopIteration, next, i)
self.assertEqual([u.username for u in qr], ['u1', 'u2', 'u3'])
self.assertEqual(query[0].username, 'u1')
self.assertEqual(query[2].username, 'u3')
self.assertRaises(StopIteration, next, i)
def test_iterator(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id).execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
self.assertTrue(qr._populated)
self.assertEqual(qr._result_cache, [])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
with self.assertQueryCount(1):
qr = User.select().where(User.username == 'xxx').execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, [])
def test_iterator_query_method(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id)
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
def test_iterator_extended(self):
User.create_users(10)
for i in range(1, 4):
for j in range(i):
Blog.create(
title='blog-%s-%s' % (i, j),
user=User.get(User.username == 'u%s' % i))
qr = (User
.select(
User.username,
fn.Count(Blog.pk).alias('ct'))
.join(Blog)
.where(User.username << ['u1', 'u2', 'u3'])
.group_by(User)
.order_by(User.id)
.naive())
accum = []
with self.assertQueryCount(1):
for user in qr.iterator():
accum.append((user.username, user.ct))
self.assertEqual(accum, [
('u1', 1),
('u2', 2),
('u3', 3)])
qr = (User
.select(fn.Count(User.id).alias('ct'))
.group_by(User.username << ['u1', 'u2', 'u3'])
.order_by(fn.Count(User.id).desc()))
accum = []
with self.assertQueryCount(1):
for ct, in qr.tuples().iterator():
accum.append(ct)
self.assertEqual(accum, [7, 3])
def test_fill_cache(self):
def assertUsernames(qr, n):
self.assertEqual([u.username for u in qr._result_cache], ['u%d' % i for i in range(1, n+1)])
User.create_users(20)
with self.assertQueryCount(1):
qr = User.select().execute()
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# a subsequent call will not "over-fill"
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# ask for one more and ye shall receive
qr.fill_cache(6)
self.assertFalse(qr._populated)
assertUsernames(qr, 6)
qr.fill_cache(21)
self.assertTrue(qr._populated)
assertUsernames(qr, 20)
self.assertRaises(StopIteration, next, qr)
def test_select_related(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
c11 = Comment.create(blog=b1, comment='c11')
c12 = Comment.create(blog=b1, comment='c12')
c21 = Comment.create(blog=b2, comment='c21')
c22 = Comment.create(blog=b2, comment='c22')
# missing comment.blog_id
comments = (Comment
.select(Comment.id, Comment.comment, Blog.pk, Blog.title)
.join(Blog)
.where(Blog.title == 'b1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
# missing blog.pk
comments = (Comment
.select(Comment.id, Comment.comment, Comment.blog, Blog.title)
.join(Blog)
.where(Blog.title == 'b2')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b2', 'b2'])
# both but going up 2 levels
comments = (Comment
.select(Comment, Blog, User)
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, ModelQueryResultWrapper))
comments = (Comment
.select()
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(5):
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, NaiveQueryResultWrapper))
# Go up two levels and use aliases for the joined instances.
comments = (Comment
.select(Comment, Blog, User)
.join(Blog, on=(Comment.blog == Blog.pk).alias('bx'))
.join(User, on=(Blog.user == User.id).alias('ux'))
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.bx.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.bx.ux.username for c in comments], ['u1', 'u1'])
def test_naive(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertTrue(isinstance(users._qr, NaiveQueryResultWrapper))
users = User.select(User, Blog).join(Blog).naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertEqual([u.title for u in users], ['b1', 'b2'])
query = Blog.select(Blog, User).join(User).order_by(Blog.title).naive()
self.assertEqual(query.get().user, User.get(User.username == 'u1'))
def test_tuples_dicts(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().tuples().order_by(User.id)
self.assertEqual([r for r in users], [
(u1.id, 'u1'),
(u2.id, 'u2'),
])
users = User.select().dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1'},
{'id': u2.id, 'username': 'u2'},
])
users = User.select(User, Blog).join(Blog).order_by(User.id).tuples()
self.assertEqual([r for r in users], [
(u1.id, 'u1', b1.pk, u1.id, 'b1', '', None),
(u2.id, 'u2', b2.pk, u2.id, 'b2', '', None),
])
users = User.select(User, Blog).join(Blog).order_by(User.id).dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1', 'pk': b1.pk, 'user': u1.id, 'title': 'b1', 'content': '', 'pub_date': None},
{'id': u2.id, 'username': 'u2', 'pk': b2.pk, 'user': u2.id, 'title': 'b2', 'content': '', 'pub_date': None},
])
def test_slicing_dicing(self):
def assertUsernames(users, nums):
self.assertEqual([u.username for u in users], ['u%d' % i for i in nums])
User.create_users(10)
with self.assertQueryCount(1):
uq = User.select().order_by(User.id)
for i in range(2):
res = uq[0]
self.assertEqual(res.username, 'u1')
with self.assertQueryCount(0):
for i in range(2):
res = uq[1]
self.assertEqual(res.username, 'u2')
with self.assertQueryCount(0):
for i in range(2):
res = uq[:3]
assertUsernames(res, [1, 2, 3])
with self.assertQueryCount(0):
for i in range(2):
res = uq[2:5]
assertUsernames(res, [3, 4, 5])
with self.assertQueryCount(0):
for i in range(2):
res = uq[5:]
assertUsernames(res, [6, 7, 8, 9, 10])
self.assertRaises(IndexError, uq.__getitem__, 10)
self.assertRaises(ValueError, uq.__getitem__, -1)
with self.assertQueryCount(0):
res = uq[10:]
self.assertEqual(res, [])
def test_indexing_fill_cache(self):
def assertUser(query_or_qr, idx):
self.assertEqual(query_or_qr[idx].username, 'u%d' % (idx + 1))
User.create_users(10)
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
# Ensure we can grab the first 5 users in 1 query.
for i in range(5):
assertUser(uq, i)
# Iterate in reverse and ensure only costs 1 query.
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
for i in reversed(range(10)):
assertUser(uq, i)
# Execute the query and get reference to result wrapper.
query = User.select().order_by(User.id)
query.execute()
qr = query._qr
# Getting the first user will populate the result cache with 1 obj.
assertUser(query, 0)
self.assertEqual(len(qr._result_cache), 1)
# Getting the last user will fill the cache.
assertUser(query, 9)
self.assertEqual(len(qr._result_cache), 10)
def test_prepared(self):
for i in range(2):
u = User.create(username='u%d' % i)
for j in range(2):
Blog.create(title='b%d-%d' % (i, j), user=u, content='')
for u in User.select():
# check prepared was called
self.assertEqual(u.foo, u.username)
for b in Blog.select(Blog, User).join(User):
# prepared is called for select-related instances
self.assertEqual(b.foo, b.title)
self.assertEqual(b.user.foo, b.user.username)
def test_aliasing_values(self):
User.create_users(2)
q = User.select(User.username.alias('xx')).order_by(User.username)
results = [row for row in q.dicts()]
self.assertEqual(results, [
{'xx': 'u1'},
{'xx': 'u2'}])
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Force ModelQueryResultWrapper.
q = (User
.select(User.username.alias('xx'), Blog.pk)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username))
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Use Model and Field aliases.
UA = User.alias()
q = (User
.select(
User.username.alias('x'),
UA.username.alias('y'))
.join(UA, on=(User.id == UA.id).alias('z'))
.order_by(User.username))
results = [(user.x, user.z.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
q = q.naive()
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
uq = User.select(User.id, User.username).alias('u2')
q = (User
.select(
User.username.alias('x'),
uq.c.username.alias('y'))
.join(uq, on=(User.id == uq.c.id))
.order_by(User.username))
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
class TestJoinedInstanceConstruction(ModelTestCase):
requires = [Blog, User, Relationship]
def setUp(self):
super(TestJoinedInstanceConstruction, self).setUp()
u1 = User.create(username='u1')
u2 = User.create(username='u2')
Blog.create(user=u1, title='b1')
Blog.create(user=u2, title='b2')
def test_fk_missing_pk(self):
# Not enough information.
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNone(blog.user.id)
self.assertIsNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_fk_with_pk(self):
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username, User.id)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNotNone(blog.user.id)
self.assertIsNotNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_backref_missing_pk(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username, Blog.title))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertIsNone(user.id)
self.assertIsNone(user.blog.pk)
self.assertIsNone(user.blog.user_id)
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
def test_fk_join_expr(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog, on=(User.id == Blog.user).alias('bx'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.bx.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User, on=(Blog.user == User.id).alias('ux'))
.order_by(Blog.title))
results = []
for blog in q:
results.append((blog.title, blog.ux.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_aliases(self):
B = Blog.alias()
U = User.alias()
with self.assertQueryCount(1):
q = (U.select(U.username, B.title)
.join(B, on=(U.id == B.user))
.order_by(U.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (B.select(B.title, U.username)
.join(U, on=(B.user == U.id))
.order_by(B.title))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
# No explicit join condition.
with self.assertQueryCount(1):
q = (B.select(B.title, U.username)
.join(U, on=B.user)
.order_by(B.title))
results = [(blog.title, blog.user.username) for blog in q]
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
# No explicit condition, backref.
Blog.create(user=User.get(User.username == 'u2'), title='b2-2')
with self.assertQueryCount(1):
q = (U.select(U.username, B.title)
.join(B, on=B.user)
.order_by(U.username, B.title))
results = [(user.username, user.blog.title) for user in q]
self.assertEqual(
results,
[('u1', 'b1'), ('u2', 'b2'), ('u2', 'b2-2')])
def test_subqueries(self):
uq = User.select()
bq = Blog.select(Blog.title, Blog.user).alias('bq')
with self.assertQueryCount(1):
q = (User
.select(User, bq.c.title.bind_to(Blog))
.join(bq, on=(User.id == bq.c.user_id).alias('blog'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
def test_multiple_joins(self):
Blog.delete().execute()
User.delete().execute()
users = [User.create(username='u%s' % i) for i in range(4)]
for from_user, to_user in itertools.combinations(users, 2):
Relationship.create(from_user=from_user, to_user=to_user)
with self.assertQueryCount(1):
ToUser = User.alias()
q = (Relationship
.select(Relationship, User, ToUser)
.join(User, on=Relationship.from_user)
.switch(Relationship)
.join(ToUser, on=Relationship.to_user)
.order_by(User.username, ToUser.username))
results = [(r.from_user.username, r.to_user.username) for r in q]
self.assertEqual(results, [
('u0', 'u1'),
('u0', 'u2'),
('u0', 'u3'),
('u1', 'u2'),
('u1', 'u3'),
('u2', 'u3'),
])
with self.assertQueryCount(1):
ToUser = User.alias()
q = (Relationship
.select(Relationship, User, ToUser)
.join(User,
on=(Relationship.from_user == User.id))
.switch(Relationship)
.join(ToUser,
on=(Relationship.to_user == ToUser.id).alias('to_user'))
.order_by(User.username, ToUser.username))
results = [(r.from_user.username, r.to_user.username) for r in q]
self.assertEqual(results, [
('u0', 'u1'),
('u0', 'u2'),
('u0', 'u3'),
('u1', 'u2'),
('u1', 'u3'),
('u2', 'u3'),
])
class TestQueryResultTypeConversion(ModelTestCase):
requires = [User]
def setUp(self):
super(TestQueryResultTypeConversion, self).setUp()
for i in range(3):
User.create(username='u%d' % i)
def assertNames(self, query, expected, attr='username'):
id_field = query.model_class.id
self.assertEqual(
[getattr(item, attr) for item in query.order_by(id_field)],
expected)
def test_simple_select(self):
query = UpperUser.select()
self.assertNames(query, ['U0', 'U1', 'U2'])
query = User.select()
self.assertNames(query, ['u0', 'u1', 'u2'])
def test_with_alias(self):
# Even when aliased to a different attr, the column is coerced.
query = UpperUser.select(UpperUser.username.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
def test_scalar(self):
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar(convert=True))
self.assertEqual(max_username, 'U2')
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar())
self.assertEqual(max_username, 'u2')
def test_function(self):
substr = fn.SubStr(UpperUser.username, 1, 3)
# Being the first parameter of the function, it meets the special-case
# criteria.
query = UpperUser.select(substr.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('username'))
self.assertNames(query, ['u0', 'u1', 'u2'])
query = UpperUser.select(fn.Lower(UpperUser.username).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.Lower(UpperUser.username).alias('username').coerce(False))
self.assertNames(query, ['u0', 'u1', 'u2'])
# Since it is aliased to an existing column, we will use that column's
# coerce.
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
class TestModelQueryResultWrapper(ModelTestCase):
requires = [TestModelA, TestModelB, TestModelC, User, Blog]
data = (
(TestModelA, (
('pk1', 'a1'),
('pk2', 'a2'),
('pk3', 'a3'))),
(TestModelB, (
('pk1', 'b1'),
('pk2', 'b2'),
('pk3', 'b3'))),
(TestModelC, (
('pk1', 'c1'),
('pk2', 'c2'))),
)
def setUp(self):
super(TestModelQueryResultWrapper, self).setUp()
for model_class, model_data in self.data:
for pk, data in model_data:
model_class.create(field=pk, data=data)
def test_join_expr(self):
def get_query(join_type=JOIN.INNER):
sq = (TestModelA
.select(TestModelA, TestModelB, TestModelC)
.join(
TestModelB,
on=(TestModelA.field == TestModelB.field).alias('rel_b'))
.join(
TestModelC,
join_type=join_type,
on=(TestModelB.field == TestModelC.field))
.order_by(TestModelA.field))
return sq
sq = get_query()
self.assertEqual(sq.count(), 2)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
sq = get_query(JOIN.LEFT_OUTER)
self.assertEqual(sq.count(), 3)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'), ('b3', None))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
def test_backward_join(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
for user in (u1, u2):
Blog.create(title='b-%s' % user.username, user=user)
# Create an additional blog for user 2.
Blog.create(title='b-u2-2', user=u2)
res = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username.asc(), Blog.title.asc()))
self.assertEqual([(u.username, u.blog.title) for u in res], [
('u1', 'b-u1'),
('u2', 'b-u2'),
('u2', 'b-u2-2')])
def test_joins_with_aliases(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1_1 = Blog.create(user=u1, title='b1-1')
b1_2 = Blog.create(user=u1, title='b1-2')
b2_1 = Blog.create(user=u2, title='b2-1')
UserAlias = User.alias()
BlogAlias = Blog.alias()
def assertExpectedQuery(query, is_user_query):
accum = []
with self.assertQueryCount(1):
if is_user_query:
for user in query:
accum.append((user.username, user.blog.title))
else:
for blog in query:
accum.append((blog.user.username, blog.title))
self.assertEqual(accum, [
('u1', 'b1-1'),
('u1', 'b1-2'),
('u2', 'b2-1'),
])
combinations = [
(User, BlogAlias, User.id == BlogAlias.user, True),
(User, BlogAlias, BlogAlias.user == User.id, True),
(User, Blog, User.id == Blog.user, True),
(User, Blog, Blog.user == User.id, True),
(User, Blog, None, True),
(Blog, UserAlias, UserAlias.id == Blog.user, False),
(Blog, UserAlias, Blog.user == UserAlias.id, False),
(Blog, User, User.id == Blog.user, False),
(Blog, User, Blog.user == User.id, False),
(Blog, User, None, False),
]
for Src, JoinModel, predicate, is_user_query in combinations:
query = (Src
.select(Src, JoinModel)
.join(JoinModel, on=predicate)
.order_by(SQL('1, 2')))
assertExpectedQuery(query, is_user_query)
class TestModelQueryResultForeignKeys(ModelTestCase):
requires = [Parent, Child]
def test_foreign_key_assignment(self):
parent = Parent.create(data='p1')
child = Child.create(parent=parent, data='c1')
ParentAlias = Parent.alias()
query = Child.select(Child, ParentAlias)
ljoin = (ParentAlias.id == Child.parent)
rjoin = (Child.parent == ParentAlias.id)
lhs_alias = query.join(ParentAlias, on=ljoin)
rhs_alias = query.join(ParentAlias, on=rjoin)
self.assertJoins(lhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("parent"."id" = "child"."parent_id")'])
self.assertJoins(rhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("child"."parent_id" = "parent"."id")'])
with self.assertQueryCount(1):
lchild = lhs_alias.get()
self.assertEqual(lchild.id, child.id)
self.assertEqual(lchild.parent.id, parent.id)
with self.assertQueryCount(1):
rchild = rhs_alias.get()
self.assertEqual(rchild.id, child.id)
self.assertEqual(rchild.parent.id, parent.id)
class TestSelectRelatedForeignKeyToNonPrimaryKey(ModelTestCase):
requires = [Package, PackageItem]
def test_select_related(self):
p1 = Package.create(barcode='101')
p2 = Package.create(barcode='102')
pi11 = PackageItem.create(title='p11', package='101')
pi12 = PackageItem.create(title='p12', package='101')
pi21 = PackageItem.create(title='p21', package='102')
pi22 = PackageItem.create(title='p22', package='102')
# missing PackageItem.package_id.
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, Package.barcode)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual(
[i.package.barcode for i in items],
['101', '101'])
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, PackageItem.package, Package.id)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual([i.package.id for i in items], [p1.id, p1.id])
class BaseTestPrefetch(ModelTestCase):
requires = [
User,
Blog,
Comment,
Parent,
Child,
Orphan,
ChildPet,
OrphanPet,
Category,
Post,
Tag,
TagPostThrough,
TagPostThroughAlt,
Category,
UserCategory,
Relationship,
SpecialComment,
]
user_data = [
('u1', (('b1', ('b1-c1', 'b1-c2')), ('b2', ('b2-c1',)))),
('u2', ()),
('u3', (('b3', ('b3-c1', 'b3-c2')), ('b4', ()))),
('u4', (('b5', ('b5-c1', 'b5-c2')), ('b6', ('b6-c1',)))),
]
parent_data = [
('p1', (
# children
(
('c1', ('c1-p1', 'c1-p2')),
('c2', ('c2-p1',)),
('c3', ('c3-p1',)),
('c4', ()),
),
# orphans
(
('o1', ('o1-p1', 'o1-p2')),
('o2', ('o2-p1',)),
('o3', ('o3-p1',)),
('o4', ()),
),
)),
('p2', ((), ())),
('p3', (
# children
(
('c6', ()),
('c7', ('c7-p1',)),
),
# orphans
(
('o6', ('o6-p1', 'o6-p2')),
('o7', ('o7-p1',)),
),
)),
]
category_tree = [
['root', ['p1', 'p2']],
['p1', ['p1-1', 'p1-2']],
['p2', ['p2-1', 'p2-2']],
['p1-1', []],
['p1-2', []],
['p2-1', []],
['p2-2', []],
]
def setUp(self):
super(BaseTestPrefetch, self).setUp()
for parent, (children, orphans) in self.parent_data:
p = Parent.create(data=parent)
for child_pets in children:
child, pets = child_pets
c = Child.create(parent=p, data=child)
for pet in pets:
ChildPet.create(child=c, data=pet)
for orphan_pets in orphans:
orphan, pets = orphan_pets
o = Orphan.create(parent=p, data=orphan)
for pet in pets:
OrphanPet.create(orphan=o, data=pet)
for user, blog_comments in self.user_data:
u = User.create(username=user)
for blog, comments in blog_comments:
b = Blog.create(user=u, title=blog, content='')
for c in comments:
Comment.create(blog=b, comment=c)
def _build_category_tree(self):
def cc(name, parent=None):
return Category.create(name=name, parent=parent)
root = cc('root')
p1 = cc('p1', root)
p2 = cc('p2', root)
for p in (p1, p2):
for i in range(2):
cc('%s-%s' % (p.name, i + 1), p)
class TestPrefetch(BaseTestPrefetch):
def test_prefetch_simple(self):
sq = User.select().where(User.username != 'u3')
sq2 = Blog.select().where(Blog.title != 'b2')
sq3 = Comment.select()
with self.assertQueryCount(3):
prefetch_sq = prefetch(sq, sq2, sq3)
results = []
for user in prefetch_sq:
results.append(user.username)
for blog in user.blog_set_prefetch:
results.append(blog.title)
for comment in blog.comments_prefetch:
results.append(comment.comment)
self.assertEqual(results, [
'u1', 'b1', 'b1-c1', 'b1-c2',
'u2',
'u4', 'b5', 'b5-c1', 'b5-c2', 'b6', 'b6-c1',
])
with self.assertQueryCount(0):
results = []
for user in prefetch_sq:
for blog in user.blog_set_prefetch:
results.append(blog.user.username)
for comment in blog.comments_prefetch:
results.append(comment.blog.title)
self.assertEqual(results, [
'u1', 'b1', 'b1', 'u4', 'b5', 'b5', 'u4', 'b6',
])
def test_prefetch_reverse(self):
sq = User.select()
sq2 = Blog.select().where(Blog.title != 'b2').order_by(Blog.pk)
with self.assertQueryCount(2):
prefetch_sq = prefetch(sq2, sq)
results = []
for blog in prefetch_sq:
results.append(blog.title)
results.append(blog.user.username)
self.assertEqual(results, [
'b1', 'u1',
'b3', 'u3',
'b4', 'u3',
'b5', 'u4',
'b6', 'u4'])
def test_prefetch_up_and_down(self):
blogs = Blog.select(Blog, User).join(User).order_by(Blog.title)
comments = Comment.select().order_by(Comment.comment.desc())
with self.assertQueryCount(2):
query = prefetch(blogs, comments)
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments_prefetch]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c2', 'b1-c1']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c2', 'b3-c1']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c2', 'b5-c1']),
('u4', 'b6', ['b6-c1']),
])
def test_prefetch_multi_depth(self):
sq = Parent.select()
sq2 = Child.select()
sq3 = Orphan.select()
sq4 = ChildPet.select()
sq5 = OrphanPet.select()
with self.assertQueryCount(5):
prefetch_sq = prefetch(sq, sq2, sq3, sq4, sq5)
results = []
for parent in prefetch_sq:
results.append(parent.data)
for child in parent.child_set_prefetch:
results.append(child.data)
for pet in child.childpet_set_prefetch:
results.append(pet.data)
for orphan in parent.orphan_set_prefetch:
results.append(orphan.data)
for pet in orphan.orphanpet_set_prefetch:
results.append(pet.data)
self.assertEqual(results, [
'p1', 'c1', 'c1-p1', 'c1-p2', 'c2', 'c2-p1', 'c3', 'c3-p1', 'c4',
'o1', 'o1-p1', 'o1-p2', 'o2', 'o2-p1', 'o3', 'o3-p1', 'o4',
'p2',
'p3', 'c6', 'c7', 'c7-p1', 'o6', 'o6-p1', 'o6-p2', 'o7', 'o7-p1',
])
def test_prefetch_no_aggregate(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title))
results = []
for user in query:
results.append((
user.username,
user.blog.title))
self.assertEqual(results, [
('u1', 'b1'),
('u1', 'b2'),
('u2', None),
('u3', 'b3'),
('u3', 'b4'),
('u4', 'b5'),
('u4', 'b6'),
])
def test_prefetch_self_join(self):
self._build_category_tree()
Child = Category.alias()
with self.assertQueryCount(2):
query = prefetch(Category.select().order_by(Category.id), Child)
names_and_children = [
[parent.name, [child.name for child in parent.children_prefetch]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
def test_prefetch_specific_model(self):
# User -> Blog
# -> SpecialComment (fk to user and blog)
Comment.delete().execute()
Blog.delete().execute()
User.delete().execute()
u1 = User.create(username='u1')
u2 = User.create(username='u2')
for i in range(1, 3):
for user in (u1, u2):
b = Blog.create(user=user, title='%s-b%s' % (user.username, i))
SpecialComment.create(
user=user,
blog=b,
name='%s-c%s' % (user.username, i))
u3 = User.create(username='u3')
SpecialComment.create(user=u3, name='u3-c1')
u4 = User.create(username='u4')
Blog.create(user=u4, title='u4-b1')
u5 = User.create(username='u5')
with self.assertQueryCount(3):
user_pf = prefetch(
User.select(),
Blog,
(SpecialComment, User))
results = []
for user in user_pf:
results.append((
user.username,
[b.title for b in user.blog_set_prefetch],
[c.name for c in user.special_comments_prefetch]))
self.assertEqual(results, [
('u1', ['u1-b1', 'u1-b2'], ['u1-c1', 'u1-c2']),
('u2', ['u2-b1', 'u2-b2'], ['u2-c1', 'u2-c2']),
('u3', [], ['u3-c1']),
('u4', ['u4-b1'], []),
('u5', [], []),
])
class TestPrefetchMultipleFKs(ModelTestCase):
requires = [
User,
Blog,
Relationship,
]
def create_users(self):
names = ['charlie', 'huey', 'zaizee']
return [User.create(username=username) for username in names]
def create_relationships(self, charlie, huey, zaizee):
r1 = Relationship.create(from_user=charlie, to_user=huey)
r2 = Relationship.create(from_user=charlie, to_user=zaizee)
r3 = Relationship.create(from_user=huey, to_user=charlie)
r4 = Relationship.create(from_user=zaizee, to_user=charlie)
return r1, r2, r3, r4
def test_multiple_fks(self):
charlie, huey, zaizee = self.create_users()
r1, r2, r3, r4 = self.create_relationships(charlie, huey, zaizee)
def assertRelationships(attr, values):
for relationship, value in zip(attr, values):
self.assertEqual(relationship._data, value)
with self.assertQueryCount(2):
users = User.select().order_by(User.id)
relationships = Relationship.select()
query = prefetch(users, relationships)
results = [row for row in query]
self.assertEqual(len(results), 3)
cp, hp, zp = results
assertRelationships(cp.relationships_prefetch, [
{'id': r1.id, 'from_user': charlie.id, 'to_user': huey.id},
{'id': r2.id, 'from_user': charlie.id, 'to_user': zaizee.id}])
assertRelationships(cp.related_to_prefetch, [
{'id': r3.id, 'from_user': huey.id, 'to_user': charlie.id},
{'id': r4.id, 'from_user': zaizee.id, 'to_user': charlie.id}])
assertRelationships(hp.relationships_prefetch, [
{'id': r3.id, 'from_user': huey.id, 'to_user': charlie.id}])
assertRelationships(hp.related_to_prefetch, [
{'id': r1.id, 'from_user': charlie.id, 'to_user': huey.id}])
assertRelationships(zp.relationships_prefetch, [
{'id': r4.id, 'from_user': zaizee.id, 'to_user': charlie.id}])
assertRelationships(zp.related_to_prefetch, [
{'id': r2.id, 'from_user': charlie.id, 'to_user': zaizee.id}])
def test_prefetch_multiple_fk_reverse(self):
charlie, huey, zaizee = self.create_users()
r1, r2, r3, r4 = self.create_relationships(charlie, huey, zaizee)
with self.assertQueryCount(2):
relationships = Relationship.select().order_by(Relationship.id)
users = User.select()
query = prefetch(relationships, users)
results = [row for row in query]
self.assertEqual(len(results), 4)
expected = (
('charlie', 'huey'),
('charlie', 'zaizee'),
('huey', 'charlie'),
('zaizee', 'charlie'))
for (from_user, to_user), relationship in zip(expected, results):
self.assertEqual(relationship.from_user.username, from_user)
self.assertEqual(relationship.to_user.username, to_user)
class TestPrefetchThroughM2M(ModelTestCase):
requires = [User, Note, Flag, NoteFlag]
test_data = [
('charlie', [
('rewrite peewee', ['todo']),
('rice desktop', ['done']),
('test peewee', ['todo', 'urgent']),
('write window-manager', [])]),
('huey', [
('bite mickey', []),
('scratch furniture', ['todo', 'urgent']),
('vomit on carpet', ['done'])]),
('zaizee', []),
]
def setUp(self):
super(TestPrefetchThroughM2M, self).setUp()
with test_db.atomic():
for username, note_data in self.test_data:
user = User.create(username=username)
for note, flags in note_data:
self.create_note(user, note, *flags)
def create_note(self, user, text, *flags):
note = Note.create(user=user, text=text)
for flag in flags:
try:
flag = Flag.get(Flag.label == flag)
except Flag.DoesNotExist:
flag = Flag.create(label=flag)
NoteFlag.create(note=note, flag=flag)
return note
def test_prefetch_through_m2m(self):
# One query for each table being prefetched.
with self.assertQueryCount(4):
users = User.select()
notes = Note.select().order_by(Note.text)
flags = Flag.select().order_by(Flag.label)
query = prefetch(users, notes, NoteFlag, flags)
accum = []
for user in query:
notes = []
for note in user.notes_prefetch:
flags = []
for nf in note.flags_prefetch:
self.assertEqual(nf.note_id, note.id)
self.assertEqual(nf.note.id, note.id)
flags.append(nf.flag.label)
notes.append((note.text, flags))
accum.append((user.username, notes))
self.assertEqual(self.test_data, accum)
def test_aggregate_through_m2m(self):
with self.assertQueryCount(1):
query = (User
.select(User, Note, NoteFlag, Flag)
.join(Note, JOIN.LEFT_OUTER)
.join(NoteFlag, JOIN.LEFT_OUTER)
.join(Flag, JOIN.LEFT_OUTER)
.order_by(User.id, Note.text, Flag.label)
.aggregate_rows())
accum = []
for user in query:
notes = []
for note in user.notes:
flags = []
for nf in note.flags:
self.assertEqual(nf.note_id, note.id)
flags.append(nf.flag.label)
notes.append((note.text, flags))
accum.append((user.username, notes))
self.assertEqual(self.test_data, accum)
class TestAggregateRows(BaseTestPrefetch):
def test_aggregate_users(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog, Comment)
.join(Blog, JOIN.LEFT_OUTER)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title, Comment.id)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[(blog.title,
[comment.comment for comment in blog.comments])
for blog in user.blog_set]))
self.assertEqual(results, [
('u1', [
('b1', ['b1-c1', 'b1-c2']),
('b2', ['b2-c1'])]),
('u2', []),
('u3', [
('b3', ['b3-c1', 'b3-c2']),
('b4', [])]),
('u4', [
('b5', ['b5-c1', 'b5-c2']),
('b6', ['b6-c1'])]),
])
def test_aggregate_blogs(self):
with self.assertQueryCount(1):
query = (Blog
.select(Blog, User, Comment)
.join(User)
.switch(Blog)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(Blog.title, User.username, Comment.id)
.aggregate_rows())
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c1', 'b1-c2']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c1', 'b3-c2']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c1', 'b5-c2']),
('u4', 'b6', ['b6-c1']),
])
def test_aggregate_on_expression_join(self):
with self.assertQueryCount(1):
join_expr = (User.id == Blog.user)
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER, on=join_expr)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
])
def test_aggregate_with_join_model_aliases(self):
expected = [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
]
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(
Blog,
JOIN.LEFT_OUTER,
on=(User.id == Blog.user).alias('blogz'))
.order_by(User.id, Blog.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
BlogAlias = Blog.alias()
with self.assertQueryCount(1):
query = (User
.select(User, BlogAlias)
.join(
BlogAlias,
JOIN.LEFT_OUTER,
on=(User.id == BlogAlias.user).alias('blogz'))
.order_by(User.id, BlogAlias.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
def test_aggregate_unselected_join_backref(self):
cat_1 = Category.create(name='category 1')
cat_2 = Category.create(name='category 2')
with test_db.transaction():
for i, user in enumerate(User.select().order_by(User.username)):
if i % 2 == 0:
category = cat_2
else:
category = cat_1
UserCategory.create(user=user, category=category)
with self.assertQueryCount(1):
# The join on UserCategory is a backref join (since the FK is on
# UserCategory). Additionally, UserCategory/Category are not
# selected and are only used for filtering the result set.
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.switch(User)
.join(UserCategory)
.join(Category)
.where(Category.name == cat_1.name)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u2', []),
('u4', ['b5', 'b6']),
])
def test_aggregate_manytomany(self):
p1 = Post.create(title='p1')
p2 = Post.create(title='p2')
Post.create(title='p3')
p4 = Post.create(title='p4')
t1 = Tag.create(tag='t1')
t2 = Tag.create(tag='t2')
t3 = Tag.create(tag='t3')
TagPostThroughAlt.create(tag=t1, post=p1)
TagPostThroughAlt.create(tag=t2, post=p1)
TagPostThroughAlt.create(tag=t2, post=p2)
TagPostThroughAlt.create(tag=t3, post=p2)
TagPostThroughAlt.create(tag=t1, post=p4)
TagPostThroughAlt.create(tag=t2, post=p4)
TagPostThroughAlt.create(tag=t3, post=p4)
with self.assertQueryCount(1):
query = (Post
.select(Post, TagPostThroughAlt, Tag)
.join(TagPostThroughAlt, JOIN.LEFT_OUTER)
.join(Tag, JOIN.LEFT_OUTER)
.order_by(Post.id, TagPostThroughAlt.post, Tag.id)
.aggregate_rows())
results = []
for post in query:
post_data = [post.title]
for tpt in post.tags_alt:
post_data.append(tpt.tag.tag)
results.append(post_data)
self.assertEqual(results, [
['p1', 't1', 't2'],
['p2', 't2', 't3'],
['p3'],
['p4', 't1', 't2', 't3'],
])
def test_aggregate_parent_child(self):
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, Orphan, ChildPet, OrphanPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan, JOIN.LEFT_OUTER)
.join(OrphanPet, JOIN.LEFT_OUTER)
.order_by(
Parent.data,
Child.data,
ChildPet.id,
Orphan.data,
OrphanPet.id)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set],
[(orphan.data, [pet.data for pet in orphan.orphanpet_set])
for orphan in parent.orphan_set]
))
# Without the `.aggregate_rows()` call, this would be 289!!
self.assertEqual(results, [
('p1',
[('c1', ['c1-p1', 'c1-p2']),
('c2', ['c2-p1']),
('c3', ['c3-p1']),
('c4', [])],
[('o1', ['o1-p1', 'o1-p2']),
('o2', ['o2-p1']),
('o3', ['o3-p1']),
('o4', [])],
),
('p2', [], []),
('p3',
[('c6', []),
('c7', ['c7-p1'])],
[('o6', ['o6-p1', 'o6-p2']),
('o7', ['o7-p1'])],)
])
def test_aggregate_with_unselected_joins(self):
with self.assertQueryCount(1):
query = (Child
.select(Child, ChildPet, Parent)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Child)
.join(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Child.data, ChildPet.data)
.aggregate_rows())
results = []
for child in query:
results.append((
child.data,
child.parent.data,
[child_pet.data for child_pet in child.childpet_set]))
self.assertEqual(results, [
('c6', 'p3', []),
('c7', 'p3', ['c7-p1']),
])
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, ChildPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Parent.data, Child.data, ChildPet.data)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set]))
self.assertEqual(results, [('p3', [
('c6', []),
('c7', ['c7-p1']),
])])
def test_aggregate_rows_ordering(self):
# Refs github #519.
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username.desc(), Blog.title.desc())
.aggregate_rows())
accum = []
for user in query:
accum.append((
user.username,
[blog.title for blog in user.blog_set]))
if sys.version_info[:2] > (2, 6):
self.assertEqual(accum, [
('u4', ['b6', 'b5']),
('u3', ['b4', 'b3']),
('u2', []),
('u1', ['b2', 'b1']),
])
def test_aggregate_rows_self_join(self):
self._build_category_tree()
Child = Category.alias()
# Same query, but this time use an `alias` on the join expr.
with self.assertQueryCount(1):
query = (Category
.select(Category, Child)
.join(
Child,
JOIN.LEFT_OUTER,
on=(Category.id == Child.parent).alias('childrenx'))
.order_by(Category.id, Child.id)
.aggregate_rows())
names_and_children = [
[parent.name, [child.name for child in parent.childrenx]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
def test_multiple_fks(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
UserAlias = User.alias()
with self.assertQueryCount(1):
query = (User
.select(User, Relationship, UserAlias)
.join(
Relationship,
JOIN.LEFT_OUTER,
on=Relationship.from_user)
.join(
UserAlias,
on=(
Relationship.to_user == UserAlias.id
).alias('to_user'))
.order_by(User.username, Relationship.id)
.where(User.username == 'charlie')
.aggregate_rows())
results = [row for row in query]
self.assertEqual(len(results), 1)
user = results[0]
self.assertEqual(user.username, 'charlie')
self.assertEqual(len(user.relationships), 2)
rh, rz = user.relationships
self.assertEqual(rh.to_user.username, 'huey')
self.assertEqual(rz.to_user.username, 'zaizee')
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
with self.assertQueryCount(1):
query = (Relationship
.select(Relationship, FromUser, ToUser)
.join(FromUser, on=from_join.alias('from_user'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.order_by(Relationship.id)
.aggregate_rows())
results = [
(relationship.from_user.username,
relationship.to_user.username)
for relationship in query]
self.assertEqual(results, [
('charlie', 'huey'),
('charlie', 'zaizee'),
('huey', 'charlie'),
('zaizee', 'charlie'),
])
def test_multiple_fks_multi_depth(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
human = Category.create(name='human')
kitty = Category.create(name='kitty')
UserCategory.create(user=charlie, category=human)
UserCategory.create(user=huey, category=kitty)
UserCategory.create(user=zaizee, category=kitty)
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
FromUserCategory = UserCategory.alias()
ToUserCategory = UserCategory.alias()
from_uc_join = (FromUser.id == FromUserCategory.user)
to_uc_join = (ToUser.id == ToUserCategory.user)
FromCategory = Category.alias()
ToCategory = Category.alias()
from_c_join = (FromUserCategory.category == FromCategory.id)
to_c_join = (ToUserCategory.category == ToCategory.id)
with self.assertQueryCount(1):
query = (Relationship
.select(
Relationship,
FromUser,
ToUser,
FromUserCategory,
ToUserCategory,
FromCategory,
ToCategory)
.join(FromUser, on=from_join.alias('from_user'))
.join(FromUserCategory, on=from_uc_join.alias('fuc'))
.join(FromCategory, on=from_c_join.alias('category'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.join(ToUserCategory, on=to_uc_join.alias('tuc'))
.join(ToCategory, on=to_c_join.alias('category'))
.order_by(Relationship.id)
.aggregate_rows())
results = []
for obj in query:
from_user = obj.from_user
to_user = obj.to_user
results.append((
from_user.username,
from_user.fuc[0].category.name,
to_user.username,
to_user.tuc[0].category.name))
self.assertEqual(results, [
('charlie', 'human', 'huey', 'kitty'),
('charlie', 'human', 'zaizee', 'kitty'),
('huey', 'kitty', 'charlie', 'human'),
('zaizee', 'kitty', 'charlie', 'human'),
])
class TestAggregateRowsRegression(ModelTestCase):
requires = [
User,
Blog,
Comment,
Category,
CommentCategory,
BlogData]
def setUp(self):
super(TestAggregateRowsRegression, self).setUp()
u = User.create(username='u1')
b = Blog.create(title='b1', user=u)
BlogData.create(blog=b)
c1 = Comment.create(blog=b, comment='c1')
c2 = Comment.create(blog=b, comment='c2')
cat1 = Category.create(name='cat1')
cat2 = Category.create(name='cat2')
CommentCategory.create(category=cat1, comment=c1, sort_order=1)
CommentCategory.create(category=cat2, comment=c1, sort_order=1)
CommentCategory.create(category=cat1, comment=c2, sort_order=2)
CommentCategory.create(category=cat2, comment=c2, sort_order=2)
def test_aggregate_rows_regression(self):
comments = (Comment
.select(
Comment,
CommentCategory,
Category,
Blog,
BlogData)
.join(CommentCategory, JOIN.LEFT_OUTER)
.join(Category, JOIN.LEFT_OUTER)
.switch(Comment)
.join(Blog)
.join(BlogData, JOIN.LEFT_OUTER)
.where(Category.id == 1)
.order_by(CommentCategory.sort_order))
with self.assertQueryCount(1):
c_list = list(comments.aggregate_rows())
def test_regression_506(self):
user = User.create(username='u2')
for i in range(2):
Blog.create(title='u2-%s' % i, user=user)
users = (User
.select()
.order_by(User.id.desc())
.paginate(1, 5)
.alias('users'))
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog)
.join(users, on=(User.id == users.c.id))
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1']),
('u2', ['u2-0', 'u2-1']),
])
class TestPrefetchNonPKFK(ModelTestCase):
requires = [Package, PackageItem]
data = {
'101': ['a', 'b'],
'102': ['c'],
'103': [],
'104': ['a', 'b', 'c', 'd', 'e'],
}
def setUp(self):
super(TestPrefetchNonPKFK, self).setUp()
for barcode, titles in self.data.items():
Package.create(barcode=barcode)
for title in titles:
PackageItem.create(package=barcode, title=title)
def test_prefetch(self):
packages = Package.select().order_by(Package.barcode)
items = PackageItem.select().order_by(PackageItem.id)
query = prefetch(packages, items)
for package, (barcode, titles) in zip(query, sorted(self.data.items())):
self.assertEqual(package.barcode, barcode)
self.assertEqual(
[item.title for item in package.items_prefetch],
titles)
packages = (Package
.select()
.where(Package.barcode << ['101', '104'])
.order_by(Package.id))
items = items.where(PackageItem.title << ['a', 'c', 'e'])
query = prefetch(packages, items)
accum = {}
for package in query:
accum[package.barcode] = [
item.title for item in package.items_prefetch]
self.assertEqual(accum, {
'101': ['a'],
'104': ['a', 'c','e'],
})
| [
"[email protected]"
] | |
14364c67317679c69633bfef740873388273e709 | e74187c3ccb41042fe35953f081e90f671aca9a4 | /src/103_train_lgbm1.py | 4aac2c6c8e5f10f3353a47bdcdd42a5cccc70bed | [] | no_license | lockbro/KDD-Cup-2019 | 1d2bef17ecc4e164c26f4d18b5233889e559d18e | 07bc25eb5a4d9ec33aa9b90fd1ef56de6da89f9d | refs/heads/master | 2021-04-23T17:26:20.559648 | 2019-08-09T11:51:22 | 2019-08-09T11:51:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,061 | py |
import gc
import json
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import sys
import time
import warnings
from contextlib import contextmanager
from glob import glob
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold, StratifiedKFold
from tqdm import tqdm
from utils import line_notify, loadpkl, eval_f, save2pkl
from utils import NUM_FOLDS, FEATS_EXCLUDED, CAT_COLS
#==============================================================================
# Traing LightGBM (city 1)
#==============================================================================
warnings.filterwarnings('ignore')
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
# Display/plot feature importance
def display_importances(feature_importance_df_, outputpath, csv_outputpath):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
# for checking all importance
_feature_importance_df_=feature_importance_df_.groupby('feature').sum()
_feature_importance_df_.to_csv(csv_outputpath)
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig(outputpath)
# LightGBM GBDT with KFold or Stratified KFold
def kfold_lightgbm(train_df,test_df,num_folds,stratified=False,debug=False):
print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
# Cross validation model
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=326)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=326)
# Create arrays and dataframes to store results
oof_preds = np.zeros((train_df.shape[0],12))
sub_preds = np.zeros((test_df.shape[0],12))
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in FEATS_EXCLUDED]
# k-fold
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['click_mode'])):
train_x, train_y = train_df[feats].iloc[train_idx], train_df['click_mode'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['click_mode'].iloc[valid_idx]
# set data structure
lgb_train = lgb.Dataset(train_x,
label=train_y,
categorical_feature=CAT_COLS,
free_raw_data=False)
lgb_test = lgb.Dataset(valid_x,
label=valid_y,
categorical_feature=CAT_COLS,
free_raw_data=False)
# params
params ={
'device' : 'gpu',
'task': 'train',
'boosting': 'gbdt',
'objective': 'multiclass',
'metric': 'multiclass',
'learning_rate': 0.01,
'num_class': 12,
'num_leaves': 52,
'colsample_bytree': 0.3490457769968177,
'subsample': 0.543646263362097,
'max_depth': 11,
'reg_alpha': 4.762312990232561,
'reg_lambda': 9.98131082276387,
'min_split_gain': 0.19161156850826594,
'min_child_weight': 15.042054927368088,
'min_data_in_leaf': 17,
'verbose': -1,
'seed':int(2**n_fold),
'bagging_seed':int(2**n_fold),
'drop_seed':int(2**n_fold)
}
clf = lgb.train(
params,
lgb_train,
valid_sets=[lgb_train, lgb_test],
valid_names=['train', 'test'],
# feval=eval_f,
num_boost_round=10000,
early_stopping_rounds= 200,
verbose_eval=100
)
# save model
clf.save_model('../output/lgbm_1_{}.txt'.format(n_fold))
oof_preds[valid_idx] = clf.predict(valid_x, num_iteration=clf.best_iteration)
sub_preds += clf.predict(test_df[feats], num_iteration=clf.best_iteration) / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = np.log1p(clf.feature_importance(importance_type='gain', iteration=clf.best_iteration))
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d F1 Score : %.6f' % (n_fold + 1, f1_score(valid_y,np.argmax(oof_preds[valid_idx],axis=1),average='weighted')))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
# Full F1 Score & LINE Notify
full_f1 = f1_score(train_df['click_mode'], np.argmax(oof_preds,axis=1),average='weighted')
print('Full F1 Score %.6f' % full_f1)
line_notify('Full F1 Score %.6f' % full_f1)
# display importances
display_importances(feature_importance_df,
'../imp/lgbm_importances_1.png',
'../imp/feature_importance_lgbm_1.csv')
if not debug:
# save prediction for submit
test_df['recommend_mode'] = np.argmax(sub_preds, axis=1)
test_df = test_df.reset_index()
# post processing
test_df['recommend_mode'][(test_df['plan_num_plans']==1)&(test_df['recommend_mode']!=0)] = test_df['plan_0_transport_mode'][(test_df['plan_num_plans']==1)&(test_df['recommend_mode']!=0)]
# save csv
test_df[['sid','recommend_mode']].to_csv(submission_file_name, index=False)
# save out of fold prediction
train_df.loc[:,'recommend_mode'] = np.argmax(oof_preds, axis=1)
train_df = train_df.reset_index()
train_df[['sid','click_mode','recommend_mode']].to_csv(oof_file_name, index=False)
# save prediction for submit
sub_preds = pd.DataFrame(sub_preds)
sub_preds.columns = ['pred_lgbm_plans{}'.format(c) for c in sub_preds.columns]
sub_preds['sid'] = test_df['sid']
sub_preds['click_mode'] = test_df['click_mode']
# save out of fold prediction
oof_preds = pd.DataFrame(oof_preds)
oof_preds.columns = ['pred_lgbm_plans{}'.format(c) for c in oof_preds.columns]
oof_preds['sid'] = train_df['sid']
oof_preds['click_mode'] = train_df['click_mode']
# merge
df = oof_preds.append(sub_preds)
# save as pkl
save2pkl('../features/lgbm_pred_1.pkl', df)
line_notify('{} finished.'.format(sys.argv[0]))
def main(debug=False):
with timer("Load Datasets"):
# load feathers
files = sorted(glob('../features/feats1/*.feather'))
df = pd.concat([pd.read_feather(f) for f in tqdm(files, mininterval=60)], axis=1)
# use selected features
df = df[configs['features']]
# set card_id as index
df.set_index('sid', inplace=True)
# split train & test
train_df = df[df['click_mode'].notnull()]
test_df = df[df['click_mode'].isnull()]
del df
gc.collect()
if debug:
train_df=train_df.iloc[:1000]
with timer("Run LightGBM with kfold"):
kfold_lightgbm(train_df, test_df, num_folds=NUM_FOLDS, stratified=True, debug=debug)
if __name__ == "__main__":
submission_file_name = "../output/submission_lgbm_1.csv"
oof_file_name = "../output/oof_lgbm_1.csv"
configs = json.load(open('../configs/103_lgbm.json'))
with timer("Full model run"):
main(debug=False)
| [
"[email protected]"
] | |
1b30a149581d0b5b8db7a100fe659f0611e29ef0 | 73d150e0b7de927948f89aa604537b312c69ef4d | /books/migrations/0006_set_start_end_page.py | 3542211e8232b7d5b3b7eabc8f906c44f0d3e612 | [] | no_license | nonZero/Hagadot | f7090f1348a1dac10e095c5d7bde9700b7241680 | cd1735c1c1167c91efd8201ac84d863ada3373fc | refs/heads/master | 2021-04-25T05:49:20.213406 | 2018-04-26T12:24:12 | 2018-04-26T12:24:12 | 113,889,523 | 0 | 2 | null | 2021-03-18T20:33:32 | 2017-12-11T17:41:47 | Python | UTF-8 | Python | false | false | 566 | py | # Generated by Django 2.0.4 on 2018-04-09 21:55
from django.db import migrations
from django.db.models import F
def forwards(apps, schema_editor):
Book = apps.get_model("books", "Book")
db_alias = schema_editor.connection.alias
m = Book.objects.using(db_alias)
m.filter(start_page=None).update(start_page=1)
m.filter(end_page=None).update(end_page=F('num_pages'))
class Migration(migrations.Migration):
dependencies = [
('books', '0005_auto_20180409_2147'),
]
operations = [
migrations.RunPython(forwards)
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.