blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4df9bc50ac2c592bccd0426d6011c97ff2d0b362 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /rihSbQq6x8R2D4aoa_9.py | 3e356f033f949374071534aae585fb0fbbe6eded | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,286 | py | """
As you know, the function `range()` returns a range of numbers, but it doesn't
work on alphabets. In this challenge, we try to fill this gap.
Write a function `alpha-range()` which takes three arguments `start`, `stop`,
and `step` (which its default value is one). The function must return a list
of alphabetical characters, ranging from start character to stop character
based on `step` value.
The function must follow these conditions:
* If `step` is zero or more than 26 or less than -26, return `"step must be a non-zero value between -26 and 26, exclusive"`.
* Both `start` and `stop` must share the same case, otherwise, return `"both start and stop must share the same case"`.
Like `range()` function:
* `step` must not be zero.
Unlike `range()` function:
* returned list must be inclusive.
* the order of characters doesn't affect the output (i.e. the output of `alpha_range("a", "f")` is the same as `alpha_range("f", "a")`, see examples).
### Examples
alpha_range("a", "f") ➞ ["a", "b", "c", "d", "e", "f"]
alpha_range("f", "a") ➞ ["a", "b", "c", "d", "e", "f"]
alpha_range("a", "f", -1) ➞ ["f", "e", "d", "c", "b", "a"]
alpha_range("f", "a", -1) ➞ ["f", "e", "d", "c", "b", "a"]
alpha_range("A", "F", -1) ➞ ["F", "E", "D", "C", "B", "A"]
alpha_range("A", "F", 0) ➞ "step must be a non-zero value between -26 and 26, exclusive"
alpha_range("A", "F", -26) ➞ "step must be a non-zero value between -26 and 26, exclusive"
alpha_range("a", "F", -1) ➞ "both start and stop must share the same case"
### Notes
All the `start` and `stop` values in the tests are valid alphabetical
characters.
"""
def alpha_range(start, stop, step=1):
if step == 0 or step < -26 or step > 26:
return "step must be a non-zero value between -26 and 26, exclusive"
if start.islower() and stop.isupper() or start.isupper() and stop.islower():
return "both start and stop must share the same case"
if step>0:
start, stop = min(ord(start), ord(stop)), max(ord(start), ord(stop))
return [chr(i) for i in range(start, stop+1, step)]
else:
start, stop = max(ord(start), ord(stop)), min(ord(start), ord(stop))
return [chr(i) for i in range(start, stop-1, step)]
| [
"[email protected]"
] | |
810b973f3daffeae7e9ec96715c0a41d5c404fb7 | c85e68eda2058433d9b43256a121dcc3190af38f | /npx/command/plot.py | a4a0fb901c734b18bc3f10792496253b8cc489ff | [] | no_license | nickstenning/npx | f87686d11b6612af373ada850878856b0bab36a3 | 385816c7725b6ce196fc2ddff369d199985683cb | refs/heads/master | 2023-06-22T11:13:53.405289 | 2011-11-30T01:07:22 | 2011-11-30T01:07:22 | 2,723,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | import os
import sys
from matplotlib import pyplot as plt
def main():
data = [float(line) for line in sys.stdin]
plt.plot(data)
if os.isatty(sys.stdout.fileno()):
plt.show()
else:
plt.savefig(sys.stdout, format='png')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3893842b288a7438c1453710f3e9b12b003c6a7a | 5c5fdbda26a8a066e2023c73d5a4188a37223c2d | /validator/wsgi.py | 17da9409496b4f2266e66efb74e831077241167e | [] | no_license | Vivekyadv/SE-validator | abb806d6ac8217a3846476106d4ba27de98e9d2d | c11671cc9ecc3538a8fba522f9b6b350dce966bb | refs/heads/master | 2023-04-25T15:17:10.587731 | 2021-05-19T10:27:12 | 2021-05-19T10:27:12 | 358,607,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for validator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'validator.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
20f4ecd5a05204c7e126e3e65438821d69d637de | 97f0fe78c0a2c440d96fe920a21b2e07c9e639bf | /examples/drawing/knuth_miles.py | 3c691e5917d5d769af732a776d42c89db35ad3c0 | [
"BSD-3-Clause"
] | permissive | drewconway/networkx | 09702b860a61b6717152610e05935a000988d1d5 | 7469e2fd808bac393e55cd6769256655fe26ba16 | refs/heads/master | 2020-12-25T14:23:12.169674 | 2011-07-26T19:59:55 | 2011-07-26T19:59:55 | 2,037,974 | 14 | 3 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | ../graph/knuth_miles.py | [
"none@none"
] | none@none |
68c5f217b3f515a70496a04ae87399d87f2d2d50 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/api_lib/container/binauthz/kms.py | dd5eb041a8db3904921f0db96705cb7e2d0eee0c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 3,213 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for interacting with the cloudkms API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.kms import get_digest
from googlecloudsdk.command_lib.kms import maps
import six
API_NAME = 'cloudkms'
V1 = 'v1'
DEFAULT_VERSION = V1
class Client(object):
"""A client to access cloudkms for binauthz purposes."""
def __init__(self, api_version=None):
"""Creates a Cloud KMS client.
Args:
api_version: If provided, the cloudkms API version to use.
"""
if api_version is None:
api_version = DEFAULT_VERSION
self.client = apis.GetClientInstance(API_NAME, api_version)
self.messages = apis.GetMessagesModule(API_NAME, api_version)
def GetPublicKey(self, key_ref):
"""Retrieves the public key for given CryptoKeyVersion."""
req = self.messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyRequest(
name=key_ref)
return (
self.client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions.
GetPublicKey(req))
def AsymmetricSign(self, key_ref, digest_algorithm, plaintext):
"""Sign a string payload with an asymmetric KMS CryptoKeyVersion.
Args:
key_ref: The CryptoKeyVersion relative resource name to sign with.
digest_algorithm: The name of the digest algorithm to use in the signing
operation. May be one of 'sha256', 'sha384', 'sha512'.
plaintext: The plaintext bytes to sign.
Returns:
An AsymmetricSignResponse.
"""
digest = get_digest.GetDigestOfFile(
digest_algorithm, six.BytesIO(plaintext))
req = self.messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignRequest(
name=key_ref,
asymmetricSignRequest=self.messages.AsymmetricSignRequest(
digest=digest))
return (
self.client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions.
AsymmetricSign(req))
def GetKeyUri(key_ref):
"""Returns the URI used as the default for KMS keys.
This should look something like '//cloudkms.googleapis.com/v1/...'
Args:
key_ref: A CryptoKeyVersion Resource.
Returns:
The string URI.
"""
return key_ref.SelfLink().split(':', 1)[1]
def GetAlgorithmDigestType(key_algorithm):
"""Returns the digest name associated with the given CryptoKey Algorithm."""
for digest_name in maps.DIGESTS:
if digest_name in key_algorithm.name.lower():
return digest_name
| [
"[email protected]"
] | |
9173692ea365b340a64f0d8af8f685daf1708995 | b0d763b2eace81e82eb3405fba13f2da04495f34 | /alshamelah_api/apps/users/migrations/0022_auto_20200713_0050.py | ca98b7fbb84ab28cb6e208b7cca1f6bcbf7b8500 | [
"MIT"
] | permissive | devna-dev/durar-backend | 7b57fe93e2687a64168ac75758b436109394bd9c | 36ea29bafd4cb95098e4057eb71df211dc923008 | refs/heads/master | 2022-12-15T08:25:30.079110 | 2020-07-29T11:50:19 | 2020-07-29T11:50:19 | 295,212,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # Generated by Django 3.0.8 on 2020-07-13 00:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0021_auto_20200703_0223'),
]
operations = [
migrations.CreateModel(
name='PasswordOTP',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('users.otp',),
),
migrations.AlterField(
model_name='otp',
name='type',
field=models.CharField(choices=[('E', 'Email'), ('P', 'Phone'), ('PW', 'Passwod')], max_length=2, verbose_name='Type'),
),
]
| [
"[email protected]"
] | |
97258bed67c0ef562954fa2a7300ced997d0377e | 2dd4f43f5d519f5533b65ff9f844eb3fe2d57fb6 | /FusionIIIT/applications/office_module/views.py | 8350173e8671a501a040d3af489a78d7de0067e7 | [] | no_license | sumitkumar15061977/FusionIIIT | 840c5b736eb7d4f221ee16cb5cdb246fa930a49c | 67cac264c34f5fe1a10a766eef573cf840def84c | refs/heads/master | 2021-09-09T08:21:25.167091 | 2018-03-14T10:58:52 | 2018-03-14T10:58:52 | 125,206,505 | 1 | 0 | null | 2018-03-14T12:06:44 | 2018-03-14T12:06:43 | null | UTF-8 | Python | false | false | 749 | py | from django.shortcuts import render
def officeOfDeanStudents(request):
context = {}
return render(request, "officeModule/officeOfDeanStudents/officeOfDeanStudents.html", context)
def officeOfPurchaseOfficr(request):
return render(request, "officeModule/officeOfPurchaseOfficer/officeOfPurchaseOfficer.html", {})
def officeOfRegistrar(request):
context = {}
return render(request, "officeModule/officeOfRegistrar/officeOfRegistrar.html", context)
def officeOfDeanRSPC(request):
context = {}
return render(request, "officeModule/officeOfDeanRSPC/officeOfDeanRSPC.html", context)
def genericModule(request):
context = {}
return render(request, "officeModule/genericModule/genericModule.html", context)
| [
"[email protected]"
] | |
3fd73e4197c65c49fbfae88fa986693566e107bf | 9e2f24027e4044252639563461116a895acce039 | /biosteam/units/facilities/_cleaning_in_place.py | c878effb78118dc3773aec8f7b0f67780f21eac1 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"NCSA"
] | permissive | yalinli2/biosteam | 5010b5d430cc746f6fa00a23805a1c1f5cac7a81 | e7385ca1feac642881a357ffbc4461382549c3a4 | refs/heads/master | 2022-03-20T23:57:06.824292 | 2022-02-22T15:55:11 | 2022-02-22T15:55:11 | 190,422,353 | 0 | 0 | MIT | 2019-06-05T15:39:04 | 2019-06-05T15:39:03 | null | UTF-8 | Python | false | false | 635 | py | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <[email protected]>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from . import Facility
from ..decorators import cost
__all__ = ('CIPpackage',)
# %%
@cost('Flow rate', units='kg/hr',
S=63, cost=421e3, CE=522, BM=1.8, n=0.6)
class CIPpackage(Facility):
ticket_name = 'CIP'
line = 'CIP Package'
network_priority = 0
_N_ins = 1
_N_outs = 1
| [
"[email protected]"
] | |
e6fab0f4066de7f9c522035dbe66caa6aaa0bb4d | 489a45659476fafb66934427e42bfce3d60a0116 | /Assets/Python/Smeagolheart/StarSigns.py | 888b3d9cc9c20597e8dbe168ab8c0669687215cf | [] | no_license | billw2012/Caveman2Cosmos | 3a8c6ea347e75dbe2de9519fe70e6b38e0cf6dbe | 2382877536e1669972dd024ce2d0f3d0d5ffd988 | refs/heads/master | 2020-07-19T00:14:48.856106 | 2019-09-03T23:20:42 | 2019-09-03T23:21:02 | 197,989,388 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Star Signs
from CvPythonExtensions import *
import CvUtil
def give(GC, TRNSLTR, GAME, CyUnit, CyPlayer, CyCity = None, iPlayer = None):
aStarSignList = (
"PROMOTION_AQUARIUS",
"PROMOTION_ARIES",
"PROMOTION_CANCER",
"PROMOTION_CAPRICORN",
"PROMOTION_GEMINI",
"PROMOTION_LEO",
"PROMOTION_LIBRA",
"PROMOTION_PISCES",
"PROMOTION_SAGITTARIUS",
"PROMOTION_SCORPIO",
"PROMOTION_TAURUS",
"PROMOTION_VIRGO"
)
iChance = GAME.getSorenRandNum(12, "Star Signs") # integer 0-11
iPromotion = GC.getInfoTypeForString(aStarSignList[iChance])
CyUnit.setHasPromotion(iPromotion, True)
if CyPlayer.isHuman():
if CyCity:
szTxt = TRNSLTR.getText("TXT_KEY_MESSAGE_STARSIGN_BUILD", (CyCity.getName(),))
else:
szTxt = TRNSLTR.getText("TXT_KEY_MESSAGE_STARSIGN_CREATE", ())
iPlayer = CyUnit.getOwner()
szIcon = GC.getPromotionInfo(iPromotion).getButton()
CvUtil.sendMessage(szTxt, iPlayer, 16, szIcon, ColorTypes(44), CyUnit.getX(), CyUnit.getY(), True, True) | [
"[email protected]"
] | |
897cf237b24d935398d13bf5539afb7950f6e027 | 8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49 | /MethodRefine-Abs/blocks-world/MethodRefine/blockworld_benchmark-mid/validating/validating_17.py | 0a88642e5064667fcf0c11e786072baad5f28a04 | [] | no_license | sysulic/MethodRefine | a483d74e65337dff4bc2539ce3caa3bf83748b48 | adbb22d4663041d853d3132f75032b7561bf605c | refs/heads/master | 2020-09-14T10:45:55.948174 | 2020-05-01T09:13:59 | 2020-05-01T09:13:59 | 223,104,986 | 3 | 2 | null | 2020-04-27T11:01:36 | 2019-11-21T06:33:16 | Python | UTF-8 | Python | false | false | 1,276 | py | #!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from blockworld import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.on = {'block-2':'block-5','block-3':'block-2','block-5':False,'block-1':False,'block-4':False,}
state0.down = {'block-2':'block-3','block-3':False,'block-5':'block-2','block-1':False,'block-4':False,}
state0.clear = {'block-2':False,'block-3':False,'block-5':True,'block-1':True,'block-4':True,}
state0.on_table = {'block-2':False,'block-3':True,'block-5':False,'block-1':True,'block-4':True,}
state0.holding = False
new_tihtn_planner.declare_types({'block':['block-1','block-2','block-3','block-4','block-5',],'nothing':[()]})
new_tihtn_planner.declare_funs({pick_up:['block'],put_down:['block'],stack:['block', 'block'],checkpile1:['nothing'],checkpile2:['nothing'],checkpile3:['nothing'],checkpile4:['nothing']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('tower5','block-1','block-2', 'block-3', 'block-4', 'block-5')], [],9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods() | [
"[email protected]"
] | |
55548e1bdf4fe09c27ce7c0c0ad3ff691558058c | 9b0bdebe81e558d3851609687e4ccd70ad026c7f | /算法思想/数学/03.判断质数.py | 77655309944993d93033d7fbcc954a4636e287ec | [] | no_license | lizenghui1121/DS_algorithms | 645cdad007ccbbfa82cc5ca9e3fc7f543644ab21 | 9690efcfe70663670691de02962fb534161bfc8d | refs/heads/master | 2022-12-13T22:45:23.108838 | 2020-09-07T13:40:17 | 2020-09-07T13:40:17 | 275,062,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | """
@Author: Li Zenghui
@Date: 2020-08-08 16:39
"""
def is_prime(a):
if a < 2:
return False
for i in range(2, a // 2 + 1):
if a % i == 0:
return False
return True
print(is_prime(17))
print(is_prime(19))
print(is_prime(21))
| [
"[email protected]"
] | |
68096293210ed5bcaffcdbc2a75c9b67bd4cea6c | 57f733a3b470505e582528042cd37cb87eb5f03f | /probabilistic_programming/chapter1_4_text_msg.py | 4d612268f91df622674e0b1baf626dd797c126a6 | [] | no_license | auroua/test | 23246df57fc8644f0c2fd480d0f8c69e9b01a295 | 8bf601c886db42e0abe4f86fbcf33adef21a6470 | refs/heads/master | 2016-08-12T02:55:49.118185 | 2015-12-11T05:19:27 | 2015-12-11T05:19:27 | 44,439,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | __author__ = 'auroua'
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
count_data = np.loadtxt("txtdata.csv")
n_count_data = len(count_data)
plt.bar(np.arange(n_count_data), count_data, color="#348ABD")
plt.xlabel("Time (days)")
plt.ylabel("count of text-msgs received")
plt.title("Did the user's texting habits change over time?")
plt.xlim(0, n_count_data)
plt.show() | [
"[email protected]"
] | |
61304fc69048fd47ea1dede9b57c3795c6fe7a29 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /renix_py_api/api_gen/StartStreamArpCommand_Autogen.py | ff74cef8ed9094594b147ae8c531f391544f731c | [] | no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | """
Auto-generated File
Create Time: 2019-12-27 02:33:25
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .ROMCommand_Autogen import ROMCommand
@rom_manager.rom
class StartStreamArpCommand(ROMCommand):
def __init__(self, StreamHandles=None, **kwargs):
self._StreamHandles = StreamHandles # Stream Handles
properties = kwargs.copy()
if StreamHandles is not None:
properties['StreamHandles'] = StreamHandles
# call base class function, and it will send message to renix server to create a class.
super(StartStreamArpCommand, self).__init__(**properties)
@property
def StreamHandles(self):
"""
get the value of property _StreamHandles
"""
return self._StreamHandles
@StreamHandles.setter
def StreamHandles(self, value):
self._StreamHandles = value
def _set_streamhandles_with_str(self, value):
tmp_value = value.strip()
if tmp_value.startswith('{'):
tmp_value = tmp_value[1:-1]
self._StreamHandles = tmp_value.split()
| [
"[email protected]"
] | |
0f0f7133bbb4c192676318d7b0272d8169f50a7f | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/opflex/ovshppaghist1h.py | dfd53ebbdac4873efb0cdfa342ac43ccad09b0b9 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,935 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class OvsHppAgHist1h(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.opflex.OvsHppAgHist1h", "ovs host protection stats")
counter = CounterMeta("txBytes", CounterCategory.COUNTER, "bytes", "transmitted bytes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "txBytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "txBytesPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "txBytesSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "txBytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "txBytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "txBytesRate"
meta._counters.append(counter)
counter = CounterMeta("rxBytes", CounterCategory.COUNTER, "bytes", "received bytes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "rxBytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "rxBytesPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "rxBytesSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "rxBytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "rxBytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "rxBytesRate"
meta._counters.append(counter)
counter = CounterMeta("txPkts", CounterCategory.COUNTER, "packets", "transmitted packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "txPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "txPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "txPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "txPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "txPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "txPktsRate"
meta._counters.append(counter)
counter = CounterMeta("rxPkts", CounterCategory.COUNTER, "packets", "received packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "rxPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "rxPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "rxPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "rxPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "rxPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "rxPktsRate"
meta._counters.append(counter)
meta.moClassName = "opflexOvsHppAgHist1h"
meta.rnFormat = "HDopflexOvsHppAg1h-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical aggregated ovs host protection stats stats in 1 hour"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.hostprot.Subj")
meta.parentClasses.add("cobra.model.hostprot.Pol")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.opflex.OvsHppAgHist")
meta.rnPrefixes = [
('HDopflexOvsHppAg1h-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 34786, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rxBytesCum", "rxBytesCum", 35203, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "received bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("rxBytesCum", prop)
prop = PropMeta("str", "rxBytesPer", "rxBytesPer", 35204, PropCategory.IMPLICIT_PERIODIC)
prop.label = "received bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("rxBytesPer", prop)
prop = PropMeta("str", "rxBytesRate", "rxBytesRate", 35208, PropCategory.IMPLICIT_RATE)
prop.label = "received bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("rxBytesRate", prop)
prop = PropMeta("str", "rxBytesSpct", "rxBytesSpct", 35205, PropCategory.IMPLICIT_SUSPECT)
prop.label = "received bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("rxBytesSpct", prop)
prop = PropMeta("str", "rxBytesThr", "rxBytesThr", 35206, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "received bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("rxBytesThr", prop)
prop = PropMeta("str", "rxBytesTr", "rxBytesTr", 35207, PropCategory.IMPLICIT_TREND)
prop.label = "received bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("rxBytesTr", prop)
prop = PropMeta("str", "rxPktsCum", "rxPktsCum", 35258, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "received packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("rxPktsCum", prop)
prop = PropMeta("str", "rxPktsPer", "rxPktsPer", 35259, PropCategory.IMPLICIT_PERIODIC)
prop.label = "received packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("rxPktsPer", prop)
prop = PropMeta("str", "rxPktsRate", "rxPktsRate", 35263, PropCategory.IMPLICIT_RATE)
prop.label = "received packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("rxPktsRate", prop)
prop = PropMeta("str", "rxPktsSpct", "rxPktsSpct", 35260, PropCategory.IMPLICIT_SUSPECT)
prop.label = "received packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("rxPktsSpct", prop)
prop = PropMeta("str", "rxPktsThr", "rxPktsThr", 35261, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "received packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("rxPktsThr", prop)
prop = PropMeta("str", "rxPktsTr", "rxPktsTr", 35262, PropCategory.IMPLICIT_TREND)
prop.label = "received packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("rxPktsTr", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "txBytesCum", "txBytesCum", 35313, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "transmitted bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("txBytesCum", prop)
prop = PropMeta("str", "txBytesPer", "txBytesPer", 35314, PropCategory.IMPLICIT_PERIODIC)
prop.label = "transmitted bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("txBytesPer", prop)
prop = PropMeta("str", "txBytesRate", "txBytesRate", 35318, PropCategory.IMPLICIT_RATE)
prop.label = "transmitted bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("txBytesRate", prop)
prop = PropMeta("str", "txBytesSpct", "txBytesSpct", 35315, PropCategory.IMPLICIT_SUSPECT)
prop.label = "transmitted bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("txBytesSpct", prop)
prop = PropMeta("str", "txBytesThr", "txBytesThr", 35316, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "transmitted bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("txBytesThr", prop)
prop = PropMeta("str", "txBytesTr", "txBytesTr", 35317, PropCategory.IMPLICIT_TREND)
prop.label = "transmitted bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("txBytesTr", prop)
prop = PropMeta("str", "txPktsCum", "txPktsCum", 35368, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "transmitted packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("txPktsCum", prop)
prop = PropMeta("str", "txPktsPer", "txPktsPer", 35369, PropCategory.IMPLICIT_PERIODIC)
prop.label = "transmitted packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("txPktsPer", prop)
prop = PropMeta("str", "txPktsRate", "txPktsRate", 35373, PropCategory.IMPLICIT_RATE)
prop.label = "transmitted packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("txPktsRate", prop)
prop = PropMeta("str", "txPktsSpct", "txPktsSpct", 35370, PropCategory.IMPLICIT_SUSPECT)
prop.label = "transmitted packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("txPktsSpct", prop)
prop = PropMeta("str", "txPktsThr", "txPktsThr", 35371, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "transmitted packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("txPktsThr", prop)
prop = PropMeta("str", "txPktsTr", "txPktsTr", 35372, PropCategory.IMPLICIT_TREND)
prop.label = "transmitted packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("txPktsTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToHcloudIgw", "Tenant to IGW", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToHcloudSecurityGroup", "Tenant to Security Group", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToVzCPIf", "Tenant to vzCPIf", "cobra.model.vz.CPIf"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToVzFilter", "From fvTenant to vzFilter", "cobra.model.vz.Filter"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToVnsAbsGraph", "From fvTenant to vnsAbsGraph", "cobra.model.vns.AbsGraph"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToCloudLB", "From fvTenant to cloudLB", "cobra.model.cloud.LB"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToCloudZone", "From fvTenant to cloudZone", "cobra.model.cloud.Zone"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudCtxProfile", "Tenant to cloudCtxProfile", "cobra.model.cloud.CtxProfile"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToVzBrCP", "Tenant to vzBrCP", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHcloudCsr", "Tenant to hcloudCsr", "cobra.model.hcloud.Csr"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudExtEPg", "fv:Tenant to cloud:ExtEPg", "cobra.model.cloud.ExtEPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudRegion", "From fvTenant to cloudRegion", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHcloudRegion", "Tenant to hcloudRegion", "cobra.model.hcloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvTenantToFvCtx", "fvTenant to fvCtx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHcloudCtx", "Tenant to Hcloud context", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToHCloudEndPoint", "Tenant to hcloudEndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudApp", "Tenant to Application profile", "cobra.model.cloud.App"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("TenantToCloudEPg", "Tenant to cloud EPg", "cobra.model.cloud.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
92d9ebe5980d4ffdc0a6b8bd7097185bf06741ad | ebf7427c8605d8654c67e3386b8adb2bd7503b44 | /LeetCode Pattern/8. LinkedList/237_easy_delete_node_in_a_linked_list.py | 180eb686a740d132ad848e352f5d77c15b8f1bac | [] | no_license | ryoman81/Leetcode-challenge | 78e5bc4800a440052f8515c75829e669484fed40 | fac3a49c49d2f62eafffb201a9d9cfac988ad30a | refs/heads/master | 2023-09-04T05:21:54.569459 | 2021-10-26T14:14:08 | 2021-10-26T14:14:08 | 291,615,959 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | '''
Write a function to delete a node in a singly-linked list.
You will not be given access to the head of the list, instead you will be given access to the node to be deleted directly.
It is guaranteed that the node to be deleted is not a tail node in the list.
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.
Constraints:
The number of the nodes in the given list is in the range [2, 1000].
-1000 <= Node.val <= 1000
The value of each node in the list is unique.
The node to be deleted is in the list and is not a tail node
'''
class Solution:
'''
MY CODE VERSION
Thought:
An interesting setting is that we have NO ACCESS to the node before the target
if we want to remove current node
- let the current node value becomes the next one's value
- and make the current node point to the next next one
WHAT AN INSANE QUESTION!!!! 可以想象跳出思维定式有多重要
Complexity:
Time: O(1)
Space: O(1)
'''
def deleteNode(self, node):
node.val = node.next.val
node.next = node.next.next
return
## No need for testing...... | [
"[email protected]"
] | |
b08b1c91edffb24ecff9cb2287e3d35255dd7fcb | 4656c9b22bee48b4156eb3524bab3215a1993d83 | /packages/gui/__init__.py | 1999100759b2efb9cb603a4fdc0524162e60863b | [] | no_license | mikebourbeauart/tempaam | 0bc9215de0d967788b3c65b481a5fd3c7153dddc | c2582b5cc1fc45042c5b435f703786d7c04a51a2 | refs/heads/master | 2021-03-27T10:35:43.378899 | 2018-09-06T04:46:18 | 2018-09-06T04:46:18 | 120,359,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from publish import PubTab
from main_window import MainAAM
from .folders_widget import FoldersWidget
from .assets_widget import AssetsWidget
from .selection_tab import SelTabWidget
from .options_tab import OptionsTabWidget
| [
"[email protected]"
] | |
3e54d34efb3d978167a45f797aa25c0d34098665 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/vtk/tk/__init__.py | 19d7f3c0b6596ecf6b8b6844c8c2c89d0d7ea161 | [
"BSD-3-Clause"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 155 | py | """Tkinter widgets for VTK."""
__all__ = ['vtkTkRenderWidget', 'vtkTkImageViewerWidget',
'vtkTkRenderWindowInteractor', 'vtkTkPhotoImage']
| [
"l”[email protected]“"
] | |
9226ed8ba885ea45ad7f7bb2eb7bc69649bcb9de | 8a92f5860a44c8ca6816af18295f26d3b364a25f | /tests/cli/test_nuttercli.py | e9367a5e8c9afd2f412e418b32ff14d76ee3941f | [
"MIT"
] | permissive | microsoft/nutter | b86dd58acbac02a25f2fc6e590e28073202862c2 | 368248bb3c2ed88a60ba6f5953b89fcc2cd0364e | refs/heads/master | 2023-06-29T23:59:12.723142 | 2022-12-16T16:30:53 | 2022-12-16T16:30:53 | 219,394,533 | 225 | 36 | MIT | 2023-09-14T10:57:09 | 2019-11-04T01:43:34 | Python | UTF-8 | Python | false | false | 8,129 | py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
import pytest
import os
import json
import cli.nuttercli as nuttercli
from cli.nuttercli import NutterCLI
from common.apiclientresults import ExecuteNotebookResult
import mock
from common.testresult import TestResults, TestResult
from cli.reportsman import ReportWriterManager, ReportWritersTypes, ReportWriters
def test__get_cli_version__without_build__env_var__returns_value():
version = nuttercli.get_cli_version()
assert version is not None
def test__get_cli_header_value():
version = nuttercli.get_cli_version()
header = 'Nutter Version {}\n'.format(version)
header += '+' * 50
header += '\n'
assert nuttercli.get_cli_header() == header
def test__get_cli_version__with_build__env_var__returns_value(mocker):
version = nuttercli.get_cli_version()
build_number = '1.2.3'
mocker.patch.dict(
os.environ, {nuttercli.BUILD_NUMBER_ENV_VAR: build_number})
version_with_build_number = nuttercli.get_cli_version()
assert version_with_build_number == '{}.{}'.format(version, build_number)
def test__get_version_label__valid_string(mocker):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': 'myhost'})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': 'mytoken'})
version = nuttercli.get_cli_version()
expected = 'Nutter Version {}'.format(version)
cli = NutterCLI()
version_from_cli = cli._get_version_label()
assert expected == version_from_cli
def test__nutter_cli_ctor__handles__version_and_exits_0(mocker):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': 'myhost'})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': 'mytoken'})
with pytest.raises(SystemExit) as mock_ex:
cli = NutterCLI(version=True)
assert mock_ex.type == SystemExit
assert mock_ex.value.code == 0
def test__run__pattern__display_results(mocker):
test_results = TestResults().serialize()
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', test_results)
mocker.patch.object(cli, '_display_test_results')
cli.run('my*', 'cluster')
assert cli._display_test_results.call_count == 1
def test__nutter_cli_ctor__handles__configurationexception_and_exits_1(mocker):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': ''})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': ''})
with pytest.raises(SystemExit) as mock_ex:
cli = NutterCLI()
assert mock_ex.type == SystemExit
assert mock_ex.value.code == 1
def test__run__one_test_fullpath__display_results(mocker):
test_results = TestResults().serialize()
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', test_results)
mocker.patch.object(cli, '_display_test_results')
cli.run('test_mynotebook2', 'cluster')
assert cli._display_test_results.call_count == 1
def test__run_one_test_junit_writter__writer_writes(mocker):
test_results = TestResults().serialize()
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', test_results)
mocker.patch.object(cli, '_get_report_writer_manager')
mock_report_manager = ReportWriterManager(ReportWriters.JUNIT)
mocker.patch.object(mock_report_manager, 'write')
mocker.patch.object(mock_report_manager, 'add_result')
cli._get_report_writer_manager.return_value = mock_report_manager
cli.run('test_mynotebook2', 'cluster')
assert mock_report_manager.add_result.call_count == 1
assert mock_report_manager.write.call_count == 1
assert not mock_report_manager._providers[ReportWritersTypes.JUNIT].has_data(
)
def test__list__none__display_result(mocker):
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', 'IHAVERETURNED')
mocker.patch.object(cli, '_display_list_results')
cli.list('/')
assert cli._display_list_results.call_count == 1
def _get_cli_for_tests(mocker, result_state, life_cycle_state, notebook_result):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': 'myhost'})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': 'mytoken'})
cli = NutterCLI()
mocker.patch.object(cli._nutter, 'run_test')
cli._nutter.run_test.return_value = _get_run_test_response(
result_state, life_cycle_state, notebook_result)
mocker.patch.object(cli._nutter, 'run_tests')
cli._nutter.run_tests.return_value = _get_run_tests_response(
result_state, life_cycle_state, notebook_result)
mocker.patch.object(cli._nutter, 'list_tests')
cli._nutter.list_tests.return_value = _get_list_tests_response()
return cli
def _get_run_test_response(result_state, life_cycle_state, notebook_result):
data_json = """
{"notebook_output":
{"result": "IHaveReturned", "truncated": false},
"metadata":
{"execution_duration": 15000,
"run_type": "SUBMIT_RUN",
"cleanup_duration": 0,
"number_in_job": 1,
"cluster_instance":
{"cluster_id": "0925-141d1222-narcs242",
"spark_context_id": "803963628344534476"},
"creator_user_name": "[email protected]",
"task": {"notebook_task": {"notebook_path": "/test_mynotebook"}},
"run_id": 7, "start_time": 1569887259173,
"job_id": 4,
"state": {"result_state": "SUCCESS", "state_message": "",
"life_cycle_state": "TERMINATED"}, "setup_duration": 2000,
"run_page_url": "https://westus2.azuredatabricks.net/?o=14702dasda6094293890#job/4/run/1",
"cluster_spec": {"existing_cluster_id": "0925-141122-narcs242"}, "run_name": "myrun"}}
"""
data_dict = json.loads(data_json)
data_dict['notebook_output']['result'] = notebook_result
data_dict['metadata']['state']['result_state'] = result_state
data_dict['metadata']['state']['life_cycle_state'] = life_cycle_state
return ExecuteNotebookResult.from_job_output(data_dict)
def _get_list_tests_response():
result = {}
result['test_mynotebook'] = '/test_mynotebook'
result['test_mynotebook2'] = '/test_mynotebook2'
return result
def _get_run_tests_response(result_state, life_cycle_state, notebook_result):
data_json = """
{"notebook_output":
{"result": "IHaveReturned", "truncated": false},
"metadata":
{"execution_duration": 15000,
"run_type": "SUBMIT_RUN",
"cleanup_duration": 0,
"number_in_job": 1,
"cluster_instance":
{"cluster_id": "0925-141d1222-narcs242",
"spark_context_id": "803963628344534476"},
"creator_user_name": "[email protected]",
"task": {"notebook_task": {"notebook_path": "/test_mynotebook"}},
"run_id": 7, "start_time": 1569887259173,
"job_id": 4,
"state": {"result_state": "SUCCESS", "state_message": "",
"life_cycle_state": "TERMINATED"}, "setup_duration": 2000,
"run_page_url": "https://westus2.azuredatabricks.net/?o=14702dasda6094293890#job/4/run/1",
"cluster_spec": {"existing_cluster_id": "0925-141122-narcs242"}, "run_name": "myrun"}}
"""
data_dict = json.loads(data_json)
data_dict['notebook_output']['result'] = notebook_result
data_dict['metadata']['state']['result_state'] = result_state
data_dict['metadata']['state']['life_cycle_state'] = life_cycle_state
data_dict2 = json.loads(data_json)
data_dict2['notebook_output']['result'] = notebook_result
data_dict2['metadata']['state']['result_state'] = result_state
data_dict2['metadata']['task']['notebook_task']['notebook_path'] = '/test_mynotebook2'
data_dict2['metadata']['state']['life_cycle_state'] = life_cycle_state
results = []
results.append(ExecuteNotebookResult.from_job_output(data_dict))
results.append(ExecuteNotebookResult.from_job_output(data_dict2))
return results
| [
"[email protected]"
] | |
217b1de8d4c07839bfa55e381c6b8ea32bf45360 | d8cbc94a4207337d709a64447acb9c8fe501c75a | /correspondence_retrieval/code/feature.py | 1131c54b3b9d2cf9174b74c8a8c0d3673e09ef25 | [
"MIT"
] | permissive | sripathisridhar/acav100m | 6f672384fa723a637d94accbbe11a9a962f5f87f | 13b438b6ce46d09ba6f79aebb84ad31dfa3a8e6f | refs/heads/master | 2023-09-06T01:05:21.188822 | 2021-11-18T08:08:08 | 2021-11-18T08:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | from functools import partial
from collections import defaultdict
from tqdm import tqdm
import torch
import torchvision
import torchvision.transforms as transforms
from model import get_model
def get_loaders(num_workers):
# Datasets and Dataloaders
mean, std = [0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010] # color normalization
Rot90 = partial(
transforms.functional.rotate,
angle=90,
)
class SingleToPairTransform:
"""Pair of (original, modified)"""
def __init__(self, funcs):
self.funcs = funcs
def run(self, x):
for func in self.funcs:
x = func(x)
return x
def __call__(self, x):
return x, self.run(x)
class PairTransform:
def __init__(self, funcs):
self.funcs = funcs
def __call__(self, xs):
res = []
for x in xs:
for func in self.funcs:
x = func(x)
res.append(x)
return res
view_transform = transforms.Compose(
[
SingleToPairTransform([Rot90]),
PairTransform([transforms.ToTensor()]),
PairTransform([transforms.Normalize(mean, std)]),
]
)
datasets = {
'train': torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=view_transform),
'test': torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=view_transform),
}
loaders = {key: torch.utils.data.DataLoader(
dataset, batch_size=128, shuffle=False, num_workers=num_workers)
for key, dataset in datasets.items()}
return loaders
def _get_feature(model, loaders, device):
views_features = defaultdict(lambda: defaultdict(list))
print("extracting features")
# view / class / index
with torch.no_grad():
for loader in loaders.values():
for views, labels in tqdm(loader, ncols=80):
outputs = []
for view_index, view in enumerate(views):
view = view.to(device)
outputs = model(view)
outputs = outputs.detach().cpu()
for i in range(len(labels)):
views_features[view_index][labels[i].item()].append(outputs[i].detach().cpu())
dataset_size = sum(len(class_features) for class_features in views_features[0].values())
nclasses = len(views_features[0])
for view_index, view_features in views_features.items():
for class_index, class_feature in view_features.items():
views_features[view_index][class_index] = torch.stack(class_feature, dim=0)
return views_features, dataset_size, nclasses
def get_feature(num_workers, device, finetune=False, sample=False):
loaders = get_loaders(num_workers)
models = get_model(device, num_workers, finetune=finetune, sample=sample)
model = models['model']
features, dataset_size, nclasses = _get_feature(model, loaders, device)
return features, dataset_size, nclasses
| [
"[email protected]"
] | |
d088f9806c9ae54531c32bef8fa1818a989c00e1 | f6a9b1a1b66f369c00e8bfeb3907f927b999e77f | /test/onnx/test_onnx_opset.py | 5c701e3d48a1a095609335eeebcffdea39b2ef71 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | cjlovering/pytorch | 62f1606be4190c910a44186a3046cd36df1be9db | 78a376592d39859b06a10ce47b77db7be5118ebb | refs/heads/master | 2020-06-01T06:56:35.753689 | 2019-06-07T03:47:49 | 2019-06-07T03:57:12 | 190,688,055 | 0 | 0 | NOASSERTION | 2019-06-07T04:31:17 | 2019-06-07T04:31:16 | null | UTF-8 | Python | false | false | 5,395 | py | from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.nn import Module
import onnx
import io
from torch.onnx.symbolic_helper import _export_onnx_opset_version
from torch.onnx import ir_version, producer_name, producer_version
def check_onnx_opset_operator(model, ops, opset_version=_export_onnx_opset_version):
# check_onnx_components
assert model.ir_version == ir_version and \
model.producer_name == producer_name and \
model.producer_version == producer_version and \
model.opset_import[0].version == opset_version
# check the schema with the onnx checker
onnx.checker.check_model(model)
# check target type and attributes
graph = model.graph
# ops should contain an object for each node
# in graph.node, in the right order.
# At least the op_name should be specified,
# but the op's attributes can optionally be
# specified as well
assert len(ops) == len(graph.node)
for i in range(0, len(ops)):
assert graph.node[i].op_type == ops[i]['op_name']
if "attributes" in ops[i] :
attributes = ops[i]['attributes']
assert len(attributes) == len(graph.node[i].attribute)
for j in range(0, len(attributes)):
for attribute_field in attributes[j].keys():
assert attributes[j][attribute_field] == getattr(graph.node[i].attribute[j], attribute_field)
def check_onnx_opsets_operator(module, x, ops, opset_versions, training=False):
for opset_version in opset_versions:
f = io.BytesIO()
torch.onnx.export(module, x, f, opset_version=opset_version, training=training)
model = onnx.load(io.BytesIO(f.getvalue()))
check_onnx_opset_operator(model, ops[opset_version], opset_version)
class TestONNXOpset(TestCase):
def test_opset_fallback(self):
class MyModule(Module):
def forward(self, x):
return torch.isnan(x)
ops = [{"op_name" : "IsNaN"},
{"op_name" : "Cast", "attributes" : [{"name" : "to", "i" : 2, "type" : 2}]}]
ops = {9 : ops, 10 : ops}
x = torch.tensor([1.0, float('nan'), 2.0])
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_topk(self):
class MyModule(Module):
def forward(self, x):
return torch.topk(x, 3)
ops_9 = [{"op_name" : "TopK", "attributes" : [{"name" : "axis", "i" : -1, "type" : 2},
{"name" : "k", "i" : 3, "type" : 2}]}]
ops_10 = [{"op_name" : "Constant", "attributes" : [{"name" : "value", "type" : 4}]},
{"op_name" : "Unsqueeze", "attributes" : [{"name" : "axes", "ints" : [0], "type" : 7}]},
{"op_name" : "TopK", "attributes" : [{"name" : "axis", "i" : -1, "type" : 2}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.arange(1., 6., requires_grad=True)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_maxpool(self):
module = torch.nn.MaxPool1d(2, stride=1)
ops_9 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops_10 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
# add test with dilations
module = torch.nn.MaxPool1d(2, stride=1, dilation=2)
ops_10 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "dilations", "ints": [2], "type": 7},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_dropout(self):
class MyModule(Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
# we should only export the onnx Dropout op in training mode; test both modes
# test training mode
ops = [{"op_name" : "Dropout", "attributes" : [{"name" : "ratio", "f" : 0.5, "type" : 1}]}]
ops = {9 : ops, 10 : ops}
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10], training=True)
# test eval mode
ops = []
ops = {9 : ops, 10 : ops}
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10], training=False)
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
] | |
361b46fce3ad72fb981975dd6658176bb786c53e | 86e904c75d0140eea3e4169d216955e1c34801b3 | /python10/화면만들기/나의윈도우3.py | 49e8462a11be1cca1561d6ddab6b2a9b3f019792 | [] | no_license | reharmony/cloudpython | d62f61749e5b5862d3b81e449d5154e188a14d21 | 98e033e537d763ba86d162f58d0fe8f64249a291 | refs/heads/master | 2020-04-29T16:58:55.281917 | 2019-05-15T12:11:43 | 2019-05-15T12:11:43 | 176,281,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,109 | py | '''
Created on 2019. 4. 10.
@author: user
'''
from tkinter import *
from db.DB연결테스트 import *
id_input, pw_input, name_input, tel_input, data, record = None, None, None, None, None, None
def insert_ui():
global id_input,pw_input,name_input,tel_input
w = Tk() # 윈도우 생성
w.geometry("400x400") # 윈도우 크기 설정
w.title("나의 첫 윈도우") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
id_text = Label(w, text="아이디 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의 (부모 윈도우, 출력할 텍스트, 속성값)
pw_text = Label(w, text="패스워드 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
name_text = Label(w, text="이름 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
tel_text = Label(w, text="전화번호 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
insert = Button(w, text="회원 가입", font = ("굴림", 25), fg = "red", bg= "yellow", command = event_process_insert) # 버튼 정의 (tkinter에서는 함수에() 생략)
id_input = Entry(w, fg = "#F2CB61", font =("굴림",25), width=12) # 입력칸 정의
pw_input = Entry(w, fg = "#F2CB61", font =("굴림",25), width=12) # 입력칸 정의
name_input = Entry(w, fg = "#F2CB61", font =("굴림",25),width=12) # 입력칸 정의
tel_input = Entry(w, fg = "#F2CB61", font =("굴림",25),width=12) # 입력칸 정의
id_text.pack() # 레이블 윈도우에 삽입
id_input.pack() # 입력칸 윈도우에 삽입
pw_text.pack() # 레이블 윈도우에 삽입
pw_input.pack() # 입력칸 윈도우에 삽입
name_text.pack() # 레이블 윈도우에 삽입
name_input.pack() # 입력칸 윈도우에 삽입
tel_text.pack() # 레이블 윈도우에 삽입
tel_input.pack() # 입력칸 윈도우에 삽입
insert.pack() # 버튼 윈도우에 삽입
w.mainloop() # 윈도우 유지
def select_result_ui(record):
w = Tk() # 윈도우 생성
w.geometry("400x400") # 윈도우 크기 설정
w.title("나의 첫 윈도우") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
id_text = Label(w, text="검색된 아이디", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의 (부모 윈도우, 출력할 텍스트, 속성값)
pw_text = Label(w, text="검색된 암호", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
name_text = Label(w, text="검색된 이름", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
tel_text = Label(w, text="검색된 전화번호", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
id_input = Label(w, text=record[0],bg ="white",font =("굴림",25), width=12) # 입력칸 정의
pw_input = Label(w, text=record[1],bg ="white",font =("굴림",25), width=12) # 입력칸 정의
name_input = Label(w, text=record[2],bg ="white",font =("굴림",25),width=12) # 입력칸 정의
tel_input = Label(w, text=record[3],bg ="white",font =("굴림",25),width=12) # 입력칸 정의
id_text.pack() # 레이블 윈도우에 삽입
id_input.pack() # 입력칸 윈도우에 삽입
pw_text.pack() # 레이블 윈도우에 삽입
pw_input.pack() # 입력칸 윈도우에 삽입
name_text.pack() # 레이블 윈도우에 삽입
name_input.pack() # 입력칸 윈도우에 삽입
tel_text.pack() # 레이블 윈도우에 삽입
tel_input.pack() # 입력칸 윈도우에 삽입
w.mainloop() # 윈도우 유지
def select_ui():
global id_input,pw_input,name_input,tel_input,data
w = Tk() # 윈도우 생성
w.geometry("400x150") # 윈도우 크기 설정
w.title("나의 첫 윈도우") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
id_text = Label(w, text="검색할 ID를 입력하세요.", font=("굴림", 20), bg="#FAF4C0") # 레이블 정의 (부모 윈도우, 출력할 텍스트, 속성값)
insert = Button(w, text="검색", font = ("굴림", 15), command = event_process_select) # 버튼 정의
data = Entry(w, font =("굴림",25), width=12) # 입력칸 정의
id_text.pack() # 레이블 윈도우에 삽입
data.pack() # 입력칸 윈도우에 삽입
insert.pack() # 버튼 윈도우에 삽입
w.mainloop() # 윈도우 유지
def update_ui():
global id_input
global pw_input
global name_input
global tel_input
w = Tk() # 윈도우 생성
w.geometry("400x400") # 윈도우 크기 설정
w.title("나의 첫 윈도우") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
id_text = Label(w, text="아이디 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의 (부모 윈도우, 출력할 텍스트, 속성값)
pw_text = Label(w, text="패스워드 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
name_text = Label(w, text="이름 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
tel_text = Label(w, text="전화번호 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의
insert = Button(w, text="회원 정보 수정", font = ("굴림", 25), fg = "red", bg= "yellow", command = event_process_update) # 버튼 정의
id_input = Entry(w, fg = "#F2CB61", font =("굴림",25), width=12) # 입력칸 정의
pw_input = Entry(w, fg = "#F2CB61", font =("굴림",25), width=12) # 입력칸 정의
name_input = Entry(w, fg = "#F2CB61", font =("굴림",25),width=12) # 입력칸 정의
tel_input = Entry(w, fg = "#F2CB61", font =("굴림",25),width=12) # 입력칸 정의
id_text.pack() # 레이블 윈도우에 삽입
id_input.pack() # 입력칸 윈도우에 삽입
pw_text.pack() # 레이블 윈도우에 삽입
pw_input.pack() # 입력칸 윈도우에 삽입
name_text.pack() # 레이블 윈도우에 삽입
name_input.pack() # 입력칸 윈도우에 삽입
tel_text.pack() # 레이블 윈도우에 삽입
tel_input.pack() # 입력칸 윈도우에 삽입
insert.pack() # 버튼 윈도우에 삽입
w.mainloop() # 윈도우 유지
def delete_ui():
global id_input
global pw_input
global name_input
global tel_input
w = Tk() # 윈도우 생성
w.geometry("400x150") # 윈도우 크기 설정
w.title("나의 첫 윈도우") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
id_text = Label(w, text="아이디 입력", font=("굴림", 25), fg="blue", bg="#FAF4C0") # 레이블 정의 (부모 윈도우, 출력할 텍스트, 속성값)
insert = Button(w, text="회원 정보 삭제", font = ("굴림", 25), fg = "red", bg= "yellow", command = event_process_delete) # 버튼 정의
id_input = Entry(w, fg = "#F2CB61", font =("굴림",25), width=12) # 입력칸 정의
id_text.pack() # 레이블 윈도우에 삽입
id_input.pack() # 입력칸 윈도우에 삽입
insert.pack() # 버튼 윈도우에 삽입
w.mainloop() # 윈도우 유지
def selectall_ui():
w = Tk() # 윈도우 생성
w.geometry("400x600") # 윈도우 크기 설정
w.title("나의 첫 윈도우") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
insert = Button(w, text="회원 전체 목록 보기", font = ("굴림", 25), fg = "red", bg= "yellow", command = event_process_selectall) # 버튼 정의
insert.pack() # 버튼 윈도우에 삽입
allList = db_process_selectall()
listtext = Label(w, text=allList, font=("굴림", 25), fg="blue", bg="#FAF4C0")
listtext.pack()
w.mainloop() # 윈도우 유지
def event_process_insert():
print("이벤트가 처리 되었음...")
id = id_input.get() # get()은 값을 스트링으로 가져옴
pw = pw_input.get() # get()은 값을 스트링으로 가져옴
name = name_input.get() # get()은 값을 스트링으로 가져옴
tel = tel_input.get() # get()은 값을 스트링으로 가져옴
db_process_insert(id,pw,name,tel)
def event_process_select():
global data, record
print("정보검색 시작...")
id = data.get() # get()은 값을 스트링으로 가져옴
record = db_process_select(id)
select_result_ui(record)
def event_process_update():
print("이벤트가 처리 되었음...")
id = id_input.get() # get()은 값을 스트링으로 가져옴
pw = pw_input.get() # get()은 값을 스트링으로 가져옴
name = name_input.get() # get()은 값을 스트링으로 가져옴
tel = tel_input.get() # get()은 값을 스트링으로 가져옴
db_process_update(id,pw,name,tel)
def event_process_delete():
print("이벤트가 처리 되었음...")
id = id_input.get() # get()은 값을 스트링으로 가져옴
db_process_delete(id)
def event_process_selectall():
print("이벤트가 처리 되었음...")
db_process_selectall()
if __name__ == '__main__':
w = Tk() # 윈도우 생성
w.geometry("400x350") # 윈도우 크기 설정
w.title("메인화면") # 윈도우 제목 설정
w.resizable(width=False, height=False) # 크기고정
w.configure(bg="#FAF4C0")
insert = Button(w, text="회원 가입 하기", font = ("굴림", 25), fg = "red", bg= "yellow", command = insert_ui) # 버튼 정의
insert.pack() # 버튼 윈도우에 삽입
insert = Button(w, text="회원 검색 하기", font = ("굴림", 25), fg = "red", bg= "yellow", command = select_ui) # 버튼 정의
insert.pack() # 버튼 윈도우에 삽입
insert = Button(w, text="회원 수정 하기", font = ("굴림", 25), fg = "red", bg= "yellow", command = update_ui) # 버튼 정의
insert.pack() # 버튼 윈도우에 삽입
insert = Button(w, text="회원 탈퇴 하기", font = ("굴림", 25), fg = "red", bg= "yellow", command = delete_ui) # 버튼 정의
insert.pack() # 버튼 윈도우에 삽입
insert = Button(w, text="회원 전체 보기", font = ("굴림", 25), fg = "red", bg= "yellow", command = selectall_ui) # 버튼 정의
insert.pack() # 버튼 윈도우에 삽입
w.mainloop() # 윈도우 유지
| [
"[email protected]"
] | |
f88444e475a9343e565e7690680f1c21bae132aa | 5a4436884af5341ce855c0e84866b972a0f61c05 | /day5/recursion/6.py | 04c04f75d05c5974bac22c2c6d636a59e4b74582 | [] | no_license | sreejithev/pythoncodes | 74a420c4f025b893e27f17ba85632a4a096f17fd | 70df14871a9687916d1c4ada76c055607f13e8ce | refs/heads/master | 2021-01-21T20:59:47.056167 | 2017-06-19T09:43:17 | 2017-06-19T09:43:17 | 92,292,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | # handling tree-like structure
my_expression = 12
def eval(expr):
if isinstance(expr, int):
return expr
print eval(my_expression)
| [
"[email protected]"
] | |
799893b61cb92573ebd28b6e1e155e97ada16636 | 2e6f37e664d2cc85d0c704f20de05b2cae86771d | /options/options.py | 102d63f4a1736cad3d0d940bb8cd7134dcaf4736 | [
"MIT"
] | permissive | LEOGML/cv_template | 5bee5e43efb490649f63a7c4e1b77e62a3e1d948 | c1a87465f0aeb79dab63b0cae88861a6282c045c | refs/heads/master | 2023-01-30T21:32:38.240103 | 2020-12-15T09:39:14 | 2020-12-15T09:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,594 | py | import argparse
import sys
import torch
import misc_utils as utils
"""
Arg parse
opt = parse_args()
"""
def parse_args():
# experiment specifics
parser = argparse.ArgumentParser()
parser.add_argument('--tag', type=str, default='cache',
help='folder name to save the outputs')
parser.add_argument('--gpu_ids', '--gpu', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
# dirs (NOT often Changed)
parser.add_argument('--data_root', type=str, default='./datasets/')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--log_dir', type=str, default='./logs', help='logs are saved here')
parser.add_argument('--result_dir', type=str, default='./results', help='results are saved here')
#######################
parser.add_argument('--model', type=str, default=None, help='which model to use')
parser.add_argument('--norm', type=str, choices=['batch', 'instance', None], default=None,
help='[instance] normalization or [batch] normalization')
# batch size
parser.add_argument('--batch_size', '-b', type=int, default=1, help='input batch size')
parser.add_argument('--workers', '-w', type=int, default=4, help='dataloader workers')
# optimizer and scheduler
parser.add_argument('--optimizer', choices=['adam', 'sgd', 'radam', 'lookahead', 'ranger'], default='adam')
parser.add_argument('--scheduler', choices=['cos', 'step', 'exp', 'cyclic', 'lambda', 'None'], default='cos')
# data augmentation
parser.add_argument('--aug', action='store_true', help='Randomly scale, jitter, change hue, saturation and brightness')
parser.add_argument('--norm-input', action='store_true')
parser.add_argument('--random-erase', action='store_true', help='debug mode')
# scale
parser.add_argument('--scale', type=int, default=None, help='scale images to this size')
parser.add_argument('--crop', type=int, default=256, help='then crop to this size')
# for datasets
parser.add_argument('--dataset', default='', help='training dataset')
parser.add_argument('--transform', default='crop256', help='transform')
parser.add_argument('--val_set', type=str, default=None)
parser.add_argument('--test_set', type=str, default=None)
# init weights
parser.add_argument('--init', type=str, default=None, help='{normal, xavier, kaiming, orthogonal}')
# loss weight
parser.add_argument('--weight_ssim', type=float, default=1.1) # SSIM loss
parser.add_argument('--weight_l1', type=float, default=0.75) # l1 loss
parser.add_argument('--weight_vgg', type=float, default=0.) # content loss(vgg loss)
parser.add_argument('--weight_grad', type=float, default=0.) # gradient loss
# training options
parser.add_argument('--debug', action='store_true', help='debug mode')
parser.add_argument('--load', type=str, default=None, help='load checkpoint')
parser.add_argument('--resume', action='store_true', help='resume training, only used when --load')
parser.add_argument('--reset', action='store_true', help='reset training, only used when --load')
parser.add_argument('--epochs', '--max_epoch', type=int, default=50, help='epochs to train')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--save_freq', type=int, default=10, help='freq to save models')
parser.add_argument('--eval_freq', '--val_freq', type=int, default=10, help='freq to eval models')
parser.add_argument('--log_freq', type=int, default=1, help='freq to vis in tensorboard')
# test options
parser.add_argument('--tta', action='store_true', help='test with augmentation')
parser.add_argument('--tta-x8', action='store_true', help='test with augmentation x8')
return parser.parse_args()
opt = parse_args()
opt.device = 'cuda:' + opt.gpu_ids if torch.cuda.is_available() and opt.gpu_ids != '-1' else 'cpu'
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
def get_command_run():
args = sys.argv.copy()
args[0] = args[0].split('/')[-1]
if sys.version[0] == '3':
command = 'python3'
else:
command = 'python'
for i in args:
command += ' ' + i
return command
if opt.tag != 'cache':
with open('run_log.txt', 'a') as f:
f.writelines(utils.get_time_str(fmt="%Y-%m-%d %H:%M:%S") + ' ' + get_command_run() + '\n')
# utils.print_args(opt)
| [
"[email protected]"
] | |
03d5235b29d6899ba1a9e5331c8d10a36d39d072 | 34087e6a9bb41d9240de4c1bf91cb14a044126bc | /scripts/phono4py | 5f744ab2966b77122dd03edd3d290e23d2b6b800 | [] | no_license | materialsvirtuallab/phonopy | 62117e757f98447de2b247e4b6aa186b0b141aab | 97888bac864f8d8e5eee799b2eeef232e627f018 | refs/heads/master | 2020-12-01T03:09:31.707376 | 2014-09-08T15:42:54 | 2014-09-08T15:42:54 | 21,427,440 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,084 | #!/usr/bin/env python
# Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import numpy as np
from optparse import OptionParser
from phonopy.interface.vasp import read_vasp
from phonopy.structure.cells import get_supercell, get_primitive, print_cell
from phonopy.structure.symmetry import Symmetry
from phonopy.harmonic.force_constants import set_translational_invariance, set_permutation_symmetry, get_force_constants
from phonopy.harmonic.dynamical_matrix import DynamicalMatrix
from phonopy.units import VaspToTHz
from anharmonic.phonon3.fc3 import set_permutation_symmetry_fc3, set_translational_invariance_fc3, show_drift_fc3, get_fc3
from anharmonic.file_IO import \
parse_disp_fc3_yaml, parse_disp_fc4_yaml,\
parse_DELTA_FORCES, parse_DELTA_FORCES_FOURTH,\
write_DELTA_FC2_SETS, parse_DELTA_FC2_SETS, \
write_DELTA_FC2_FOURTH_SETS, parse_DELTA_FC2_FOURTH_SETS,\
write_FORCES_FOURTH, parse_FORCES_SECOND, \
read_fc4_from_hdf5, read_fc3_from_hdf5, read_fc2_from_hdf5, \
write_fc4_to_hdf5, write_fc3_to_hdf5, write_fc2_to_hdf5, \
write_supercells_with_three_displacements, \
write_freq_shifts_to_hdf5
from anharmonic.phonon3.displacement_fc3 import get_third_order_displacements
from anharmonic.phonon4.displacement_fc4 import get_fourth_order_displacements
from anharmonic.settings import Phono3pyConfParser
from anharmonic.phonon4.fc4 import get_fc4, show_drift_fc4, set_translational_invariance_fc4, set_permutation_symmetry_fc4
from anharmonic.phonon4 import Phono4py
# AA is created at http://www.network-science.de/ascii/.
def print_phono4py():
print """ _ _ _
_ __ | |__ ___ _ __ ___ | || | _ __ _ _
| '_ \| '_ \ / _ \| '_ \ / _ \| || |_| '_ \| | | |
| |_) | | | | (_) | | | | (_) |__ _| |_) | |_| |
| .__/|_| |_|\___/|_| |_|\___/ |_| | .__/ \__, |
|_| |_| |___/
"""
def print_end():
print """ _
___ _ __ __| |
/ _ \ '_ \ / _` |
| __/ | | | (_| |
\___|_| |_|\__,_|
"""
def print_error(message):
print message
# Parse options
parser = OptionParser()
parser.set_defaults(amplitude=None,
band_indices=None,
cell_poscar=None,
factor=None,
fc2_fourth_sets_mode=False,
forces_fourth_mode=False,
grid_points=None,
is_nodiag=False,
is_displacement=False,
is_nosym=False,
is_plusminus_displacements=False,
is_translational_symmetry=False,
is_symmetrize_fc2=False,
is_symmetrize_fc3_r=False,
is_symmetrize_fc4_r=False,
log_level=None,
mesh_numbers=None,
primitive_axis=None,
read_fc2_fourth=False,
read_fc2=False,
read_fc3=False,
read_fc4=False,
output_filename=None,
supercell_dimension=None,
symprec=1e-5,
temperatures=None,
verbose=True)
parser.add_option("--amplitude", dest="amplitude", type="float",
help="Distance of displacements")
parser.add_option("--bi", "--band_indices", dest="band_indices",
type="string",
help="Band indices where life time is calculated")
parser.add_option("-c", "--cell", dest="cell_poscar",
action="store", type="string",
help="Read unit cell", metavar="FILE")
parser.add_option("--create_fc2_fourth",
dest="fc2_fourth_sets_mode",
action="store_true",
help="Create DELTA_FC2_FOURTH_SETS, DELTA_FC2_SETS, and fc2.hdf")
parser.add_option("--cf4", "--create_f4",
dest="forces_fourth_mode",
action="store_true",
help="Create FORCES_FOURTH")
parser.add_option("-d", "--disp", dest="is_displacement",
action="store_true",
help="As first stage, get least displacements")
parser.add_option("--dim",
dest="supercell_dimension",
type="string",
help="Supercell dimension")
parser.add_option("--factor", dest="factor", type="float",
help="Conversion factor to favorite frequency unit")
parser.add_option("--fc2",
dest="read_fc2",
action="store_true",
help="Read second order force constants")
parser.add_option("--fc2_fourth",
dest="read_fc2_fourth",
action="store_true",
help="Read DELTA_FC2_FOURTH_SETS, DELTA_FC2_SETS, and fc2.hdf")
parser.add_option("--fc3",
dest="read_fc3",
action="store_true",
help="Read third order force constants")
parser.add_option("--fc4",
dest="read_fc4",
action="store_true",
help="Read fourth order force constants")
parser.add_option("--gp", "--grid_points",
dest="grid_points",
type="string",
help="Fixed grid points where damping functions are calculated ")
parser.add_option("--mesh",
dest="mesh_numbers",
type="string",
help="Mesh numbers")
parser.add_option("--nodiag", dest="is_nodiag",
action="store_true",
help="Set displacements parallel to axes")
parser.add_option("--nosym", dest="is_nosym",
action="store_true",
help="No symmetrization of triplets")
parser.add_option("-o", dest="output_filename",
type="string",
help="Filename of output of damping function")
parser.add_option("--pa", "--primitive_axis", dest="primitive_axis",
action="store", type="string",
help="Same as PRIMITIVE_AXIS tags")
parser.add_option("--pm", dest="is_plusminus_displacements",
action="store_true",
help="Set plus minus displacements")
parser.add_option("--sym_fc2", dest="is_symmetrize_fc2",
action="store_true",
help="Symmetrize fc2 by index exchange")
parser.add_option("--sym_fc3r", dest="is_symmetrize_fc3_r",
action="store_true",
help="Symmetrize fc3 in real space by index exchange")
parser.add_option("--sym_fc4r", dest="is_symmetrize_fc4_r",
action="store_true",
help="Symmetrize fc4 in real space by index exchange")
parser.add_option("--ts", dest="temperatures",
type="string", help="Temperatures for damping functions")
parser.add_option("--tsym", dest="is_translational_symmetry",
action="store_true",
help="Impose translational invariance condition")
parser.add_option("--tolerance", dest="symprec", type="float",
help="Symmetry tolerance to search")
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true",
help="Detailed run-time information is displayed")
parser.add_option("--loglevel", dest="log_level", type="int",
help="Log level")
(options, args) = parser.parse_args()
option_list = parser.option_list
# Log level
if options.log_level is None:
log_level = options.verbose
else:
log_level = options.log_level
# Create FC2_FOURTH_SETS
if options.fc2_fourth_sets_mode:
displacements = parse_disp_fc3_yaml()
write_DELTA_FC2_FOURTH_SETS(args, displacements)
print_end()
exit(0)
# Create FC2_FOURTH_SETS
if options.forces_fourth_mode:
displacements = parse_disp_fc4_yaml()
write_FORCES_FOURTH(args, displacements)
print_end()
exit(0)
# Title
if log_level:
print_phono4py()
if len(args) > 0:
phono3py_conf = Phono3pyConfParser(filename=args[0],
options=options,
option_list=option_list)
settings = phono3py_conf.get_settings()
else:
phono3py_conf = Phono3pyConfParser(options=options,
option_list=option_list)
settings = phono3py_conf.get_settings()
# Read POSCAR
if options.cell_poscar == None:
if os.path.exists('POSCAR'):
unitcell_filename = 'POSCAR'
else:
print_error("POSCAR could not be found.")
if log_level:
print_end()
sys.exit(1)
else:
if os.path.exists(options.cell_poscar):
unitcell_filename = options.cell_poscar
else:
print_error("%s not found" % options.cell_poscar)
if log_level:
print_end()
sys.exit(1)
unitcell = read_vasp(unitcell_filename,
settings.get_chemical_symbols())
# Supercell and Symmetry
supercell = get_supercell(unitcell, settings.get_supercell_matrix())
symmetry = Symmetry(supercell, options.symprec)
# Log
if log_level:
print "Spacegroup: ", symmetry.get_international_table()
###############################################################
# Create supercells with displacements and exit (pre-process) #
###############################################################
if options.is_displacement:
dds = get_fourth_order_displacements(
supercell,
symmetry,
is_plusminus=settings.get_is_plusminus_displacement(),
is_diagonal=settings.get_is_diagonal_displacement())
write_supercells_with_three_displacements(supercell,
dds,
options.amplitude)
##########################################
# Calculate fourth-order force constants #
##########################################
else:
primitive = get_primitive(
supercell,
np.dot(np.linalg.inv(settings.get_supercell_matrix()),
settings.get_primitive_matrix()),
options.symprec)
if log_level:
print "------------------------ primitive cell for fc ---------------------------"
print_cell(primitive)
print "-------------------------- supercell for fc ------------------------------"
print_cell(supercell, mapping=primitive.get_supercell_to_primitive_map())
print "----------------- ratio (supercell for fc)/(primitive) -------------------"
for vec in np.dot(supercell.get_cell(), np.linalg.inv(primitive.get_cell())):
print "%5.2f"*3 % tuple(vec)
# fc2
if options.read_fc2:
if log_level:
print "----- Read fc2 -----"
sys.stdout.flush()
if os.path.exists('fc2.hdf5'):
fc2 = read_fc2_from_hdf5()
else:
print "fc2.hdf5 not found"
if log_level:
print_end()
sys.exit(0)
else:
if log_level:
print "----- Solve fc2 -----"
sys.stdout.flush()
disp_dataset = parse_disp_fc4_yaml()
forces_second = parse_FORCES_SECOND(disp_dataset)
fc2 = get_force_constants(forces_second,
symmetry,
supercell)
if options.is_symmetrize_fc2:
set_permutation_symmetry(fc2)
if options.is_translational_symmetry:
set_translational_invariance(fc2)
if not options.read_fc2:
if log_level:
print "----- Write fc2.hdf5 -----"
write_fc2_to_hdf5(fc2)
# fc3
if options.read_fc3: # Read fc3.hdf5
if log_level:
print "----- Read fc3 -----"
sys.stdout.flush()
fc3 = read_fc3_from_hdf5()
if options.is_translational_symmetry:
set_translational_invariance_fc3(fc3)
else:
if log_level:
print "----- Solve fc3 -----"
sys.stdout.flush()
if options.read_fc2_fourth: # fc3 from DELTA_FC2_SETS
displacements = parse_disp_fc3_yaml()
parse_DELTA_FC2_SETS(displacements)
else: # fc3 from DELTA_FORCES
displacements = parse_disp_fc3_yaml()
parse_DELTA_FORCES(displacements)
fc3 = get_fc3(
supercell,
displacements,
fc2,
symmetry,
is_translational_symmetry=options.is_translational_symmetry,
is_permutation_symmetry=options.is_symmetrize_fc3_r,
verbose=log_level)
if options.is_symmetrize_fc3_r:
if log_level:
print "----- Symmetrize fc3 by index exchange in real space -----"
set_permutation_symmetry_fc3(fc3)
show_drift_fc3(fc3)
if not options.read_fc3:
if log_level:
print "----- Write fc3.hdf5 -----"
write_fc3_to_hdf5(fc3)
# fc4
if options.read_fc4: # Read fc4.hdf
if log_level:
print "----- Read fc4 -----"
sys.stdout.flush()
fc4 = read_fc4_from_hdf5()
if options.is_translational_symmetry:
if log_level:
print "----- Impose translational invariance "
print "condition to fc4 -----"
set_translational_invariance_fc4(fc4)
else:
if options.read_fc2_fourth:
displacements = parse_disp_fc4_yaml()
parse_DELTA_FC2_FOURTH_SETS(displacements)
else: # fc4 from FORCES_FOURTH, FORCES_THIRD and FORCES_SECOND
displacements = parse_disp_fc4_yaml()
parse_DELTA_FORCES_FOURTH(displacements)
fc4 = get_fc4(
supercell,
displacements,
fc3,
symmetry,
is_translational_symmetry=options.is_translational_symmetry,
is_permutation_symmetry=options.is_symmetrize_fc4_r,
verbose=log_level)
if options.is_symmetrize_fc4_r:
if log_level:
print "----- Symmetrize fc4 by index exchange in real space -----"
set_permutation_symmetry_fc4(fc4)
if log_level:
print "(Calculating fc4 drift...)"
show_drift_fc4(fc4)
if not options.read_fc4:
if log_level:
print "----- Write fc4.hdf5 -----"
write_fc4_to_hdf5(fc4)
if options.factor is None:
factor = VaspToTHz
else:
factor = options.factor
mesh = settings.get_mesh_numbers()
if mesh is not None:
phono4py = Phono4py(fc4,
supercell,
primitive,
mesh,
band_indices=settings.get_band_indices(),
frequency_factor_to_THz=factor,
cutoff_frequency=1e-2,
is_nosym=options.is_nosym,
symprec=options.symprec,
log_level=log_level)
phono4py.set_frequency_shift(temperatures=settings.get_temperatures())
phono4py.set_dynamical_matrix(fc2,
supercell,
primitive)
phono4py.run_frequency_shift(settings.get_grid_points())
freq_shifts = phono4py.get_frequency_shift()
write_freq_shifts_to_hdf5(freq_shifts)
if log_level:
print_end()
| [
"[email protected]"
] | ||
d7ef09e9673d8e4d70b066dfb8bcaaa93f049652 | 225a2183a702b4808436526464f73692899a7242 | /presentation/request/baseclient.py | c62184c0d7ccbe995de74a90dcc75e82389519ab | [] | no_license | bateternal/p2p | 9b890fd25dc2d021cec7fce9786f2c88753f1a27 | fa4c804e64e818f70d849fb6dfd94c176d008208 | refs/heads/master | 2023-09-04T03:33:53.741518 | 2021-10-26T19:51:51 | 2021-10-26T19:51:51 | 284,138,608 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py |
class BaseClient:
def __init__(self,host,port):
self.host = host
self.port = int(port) | [
"[email protected]"
] | |
933f4b58c7854b784701dd2df483ef4272ae3cfc | 77683abaded7f9f4f538c6b02635fcf342d26886 | /settings/base.py | a6c5df5d9c28a0a57e9c45082891ffd76cf32125 | [] | no_license | tchappui/flask-model | 6a3a79ef04f0e6e437b7e97be789690b69d14bd4 | 5238534b4ba6450e92cf9ff413eb0edc282305e5 | refs/heads/main | 2023-06-03T20:02:20.419972 | 2021-06-11T09:32:51 | 2021-06-11T09:32:51 | 375,974,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import os
import pathlib
BASE_DIR = pathlib.Path(__file__).resolve().parent
INSTALLED_BLUEPRINTS = [
{"module": "flaskt.home", "name": "home", "prefix": ""}
]
SECRET_KEY = os.environ.get("SECRET_KEY", "very secret and unguessable value")
SQLALCHEMY_TRACK_MODIFICATIONS = False
| [
"[email protected]"
] | |
1f0bedae459bf6d8f4e19b7d5042a0d02edeffc7 | 25040bd4e02ff9e4fbafffee0c6df158a62f0d31 | /www/htdocs/wt/lapnw/data/item_20_6.tmpl.py | 6315b38d936cbe899af9e04cbed51763e6399ddc | [] | no_license | erochest/atlas | 107a14e715a058d7add1b45922b0f8d03bd2afef | ea66b80c449e5b1141e5eddc4a5995d27c2a94ee | refs/heads/master | 2021-05-16T00:45:47.585627 | 2017-10-09T10:12:03 | 2017-10-09T10:12:03 | 104,338,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
from lap.web.templates import GlobalTemplate, SubtemplateCode
class main(GlobalTemplate):
title = 'Page.Item: 20.6'
project = 'lapnw'
class page(SubtemplateCode):
pass
| [
"eric@eric-desktop"
] | eric@eric-desktop |
66edb24833ca5e1a04c44358218a3c5742502084 | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /reference/ucmdb/discovery/vcloud_director_by_vcloud_api.py | 8e70ff594f25df698f6a246a425441fca52ea0a9 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | #coding=utf-8
import logger
import vcloud_discover
from appilog.common.system.types.vectors import ObjectStateHolderVector
import vcloud_report
TRIGGER_IPS = "ip_addresses"
TRIGGER_VCD_ID = "vCloudDirectorId"
PARAM_REPORT_POWERED_OFF_VMS = "reportPoweredOffVms"
def DiscoveryMain(Framework):
OSHVResult = ObjectStateHolderVector()
ips = Framework.getTriggerCIDataAsList(TRIGGER_IPS)
vcloudDirectorId = Framework.getDestinationAttribute(TRIGGER_VCD_ID)
reportPoweredOffVms = 0
reportPoweredOffVmsValue = Framework.getParameter(PARAM_REPORT_POWERED_OFF_VMS)
if reportPoweredOffVmsValue and reportPoweredOffVmsValue.lower() == 'true':
reportPoweredOffVms = 1
if ips:
#configure how connections should be discovered/established
connectionDiscoverer = vcloud_discover.ConnectionDiscoverer(Framework)
urlGenerator = vcloud_discover.UrlByIpGenerator()
connectionDiscoverer.setUrlGenerator(urlGenerator)
connectionDiscoverer.setIps(ips)
#configure how established/failed connection should be used
connectionHandler = vcloud_discover.BaseDiscoveryConnectionHandler(Framework)
topologyDiscoverer = vcloud_discover.createVcloudDiscoverer(Framework)
topologyReporter = vcloud_report.createVcloudReporter(Framework, vcloudDirectorId, reportPoweredOffVms)
connectionHandler.setDiscoverer(topologyDiscoverer)
connectionHandler.setReporter(topologyReporter)
connectionDiscoverer.setConnectionHandler(connectionHandler)
connectionDiscoverer.initConnectionConfigurations()
connectionDiscoverer.discover(firstSuccessful=0)
if not connectionHandler.connected:
for errorMsg in connectionHandler.connectionErrors:
Framework.reportError(errorMsg)
for warningMsg in connectionHandler.connectionWarnings:
Framework.reportWarning(warningMsg)
else:
logger.warn("Job triggered on destination without any IP")
return OSHVResult | [
"[email protected]"
] | |
da919e0179455f6b53d11e7136b6f369f5bed978 | ebe7c57183b0eeba9af1bdc72f0f81b9b8129ca9 | /1. backtracking/047.py | d0866e064c3641441032f85b63db44d36015a6ae | [] | no_license | proTao/leetcode | f2e46392b56b69606e1dd25cf5738cb0ad275645 | 97533d53c8892b6519e99f344489fa4fd4c9ab93 | refs/heads/master | 2021-11-24T10:23:56.927122 | 2021-11-18T04:28:05 | 2021-11-18T04:28:05 | 110,225,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | class Solution:
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
if len(nums) == 0:
return []
distinct_nums = set(nums)
rest = {}
for i in nums:
if i in rest:
rest[i] += 1
else:
rest[i] = 1
print(rest)
def deeper(path):
if len(path) == len(nums):
res.append(path)
for i in distinct_nums:
if rest[i] > 0:
rest[i] -= 1
deeper(path + [i])
rest[i] += 1
deeper([])
return res
s = Solution()
res = s.permuteUnique([1,1,2])
print(res) | [
"[email protected]"
] | |
4dbf967617143afff7502d5004d540220460004e | fa58068fa4e0fd4c1c3713d27752c9c36efe8a44 | /paket/main.py | c8843f50bd8aa984c6277fdbd565fab3476ac472 | [] | no_license | Zadrayca/Kourse | f611f4d3627286eeb5f1eedb7ce9bc5d06454cdc | e9a1bf6b658573eab556d9d1fa1297d452a6dfff | refs/heads/master | 2020-06-14T09:41:41.631712 | 2017-03-09T19:00:41 | 2017-03-09T19:00:41 | 75,203,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from calk import pow, multi, summa
from calk.pow import pow as r
y = multi.multi(3, 5)
x = summa.summa(5, 7)
z = r(3, 5)
print(x, y, z)
print(dir(pow))
| [
"[email protected]"
] | |
ea2f608d540ed8ad36f731c18dc431613e5ab8fb | 2db5bf5832ddb99e93bb949ace1fad1fde847319 | /beginLearn/googleclass/class3/test.py | 7ffd1cdf702097daef8aa3a03597ae2ab5d4ac2c | [] | no_license | RoderickAdriance/PythonDemo | 2d92b9aa66fcd77b6f797e865df77fbc8c2bcd14 | 98b124fecd3a972d7bc46661c6a7de8787b8e761 | refs/heads/master | 2020-04-06T17:36:46.000133 | 2018-11-15T07:07:03 | 2018-11-15T07:07:03 | 157,666,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | from __future__ import print_function
import math
from sklearn import metrics
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("data.csv")
# 数据随机化处理
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
def my_input_fn(features, targets, batchsize=1, shuffle=True, num_epochs=None):
features = {key: np.array(value) for key, value in dict(features).items()}
ds = Dataset.from_tensor_slices((features, targets))
ds = ds.batch(batchsize).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
features, label = ds.make_one_shot_iterator().get_next()
return features, label
features,label = my_input_fn(california_housing_dataframe[["total_rooms"]], california_housing_dataframe[["median_house_value"]],
batchsize=1)
| [
"[email protected]"
] | |
a87804e82e918b9342cafbd8410b9e04151ab2e4 | aee5f372ba1b5fbb1c8acf6080c4c86ae195c83f | /java-stubs/security/acl/__init__.pyi | e7af442f7ef702bb8b5218efb30dde7f6f0f22a0 | [] | no_license | rdemaria/pjlsa | 25221ae4a4b6a4abed737a41a4cafe7376e8829f | e64589ab2203338db4253fbc05ff5131142dfd5f | refs/heads/master | 2022-09-03T13:18:05.290012 | 2022-08-16T13:45:57 | 2022-08-16T13:45:57 | 51,926,309 | 1 | 5 | null | 2019-07-11T11:50:44 | 2016-02-17T13:56:40 | Python | UTF-8 | Python | false | false | 2,887 | pyi | import java.lang
import java.security
import java.util
import typing
class AclEntry(java.lang.Cloneable):
def addPermission(self, permission: 'Permission') -> bool: ...
def checkPermission(self, permission: 'Permission') -> bool: ...
def clone(self) -> typing.Any: ...
def getPrincipal(self) -> java.security.Principal: ...
def isNegative(self) -> bool: ...
def permissions(self) -> java.util.Enumeration['Permission']: ...
def removePermission(self, permission: 'Permission') -> bool: ...
def setNegativePermissions(self) -> None: ...
def setPrincipal(self, principal: java.security.Principal) -> bool: ...
def toString(self) -> str: ...
class AclNotFoundException(java.lang.Exception):
def __init__(self): ...
class Group(java.security.Principal):
def addMember(self, principal: java.security.Principal) -> bool: ...
def equals(self, object: typing.Any) -> bool: ...
def hashCode(self) -> int: ...
def isMember(self, principal: java.security.Principal) -> bool: ...
def members(self) -> java.util.Enumeration[java.security.Principal]: ...
def removeMember(self, principal: java.security.Principal) -> bool: ...
def toString(self) -> str: ...
class LastOwnerException(java.lang.Exception):
def __init__(self): ...
class NotOwnerException(java.lang.Exception):
def __init__(self): ...
class Owner:
def addOwner(self, principal: java.security.Principal, principal2: java.security.Principal) -> bool: ...
def deleteOwner(self, principal: java.security.Principal, principal2: java.security.Principal) -> bool: ...
def isOwner(self, principal: java.security.Principal) -> bool: ...
class Permission:
def equals(self, object: typing.Any) -> bool: ...
def toString(self) -> str: ...
class Acl(Owner):
def addEntry(self, principal: java.security.Principal, aclEntry: AclEntry) -> bool: ...
def checkPermission(self, principal: java.security.Principal, permission: Permission) -> bool: ...
def entries(self) -> java.util.Enumeration[AclEntry]: ...
def getName(self) -> str: ...
def getPermissions(self, principal: java.security.Principal) -> java.util.Enumeration[Permission]: ...
def removeEntry(self, principal: java.security.Principal, aclEntry: AclEntry) -> bool: ...
def setName(self, principal: java.security.Principal, string: str) -> None: ...
def toString(self) -> str: ...
class __module_protocol__(typing.Protocol):
# A module protocol which reflects the result of ``jp.JPackage("java.security.acl")``.
Acl: typing.Type[Acl]
AclEntry: typing.Type[AclEntry]
AclNotFoundException: typing.Type[AclNotFoundException]
Group: typing.Type[Group]
LastOwnerException: typing.Type[LastOwnerException]
NotOwnerException: typing.Type[NotOwnerException]
Owner: typing.Type[Owner]
Permission: typing.Type[Permission]
| [
"[email protected]"
] | |
55f4632468c9a46223772b259e528fabae433016 | ee6074aad7bef8b8279130fa561b6eb0a6e66b1e | /modules/sample.py | 20032a1959f97a7f6c2401e185e308b900bece59 | [
"MIT"
] | permissive | antiquefu/pycameresp | 23ca208c2a19d445d164fbdc25303d4be040f381 | d86814625a7cd2f7e5fa01b8e1652efc811cef3a | refs/heads/main | 2023-08-01T01:40:04.384937 | 2021-09-19T17:30:48 | 2021-09-19T17:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | # Distributed under MIT License
# Copyright (c) 2021 Remi BERTHOLET
from server.httpserver import HttpServer
from htmltemplate import Br,ButtonCmd,Option,SwitchCmd,Tag,SliderCmd,ComboCmd,Paragraph
from webpage.mainpage import *
from tools import useful
# Called when the button pressed
@HttpServer.addRoute(b'/sample/button')
async def buttonPressed(request, response, args):
print("Button clicked")
await response.sendOk()
# Called when the slider state changed
@HttpServer.addRoute(b'/sample/slider')
async def sliderChanged(request, response, args):
print("Slider change to %d"%int(request.params[b"value"]))
await response.sendOk()
# Called when the combo state changed
@HttpServer.addRoute(b'/sample/combo')
async def comboChanged(request, response, args):
print("Number %s selected"%useful.tostrings(request.params[b"value"]))
await response.sendOk()
# Called when the switch state changed
@HttpServer.addRoute(b'/sample/switch')
async def switchChanged(request, response, args):
print("Switch change to %s"%useful.tostrings(request.params[b"value"]))
await response.sendOk()
# Test simple page with button
@HttpServer.addRoute(b'/sample', menu=b"Sample", item=b"Sample")
async def samplePage(request, response, args):
page = mainFrame(request, response, args, b"Sample",
Tag(b'''
<p>Example to interact with esp32 via an html page (see the content of file <b>sample.py</b>)</p>
'''),
ButtonCmd(text=b"Click on button", path=b"/sample/button"), Br(),Br(),
SliderCmd(min=b"10", max=b"30", step=b"2", value=b"12", text=b"Move slider", path=b"/sample/slider"), Br(),
ComboCmd(\
[
Option(value=b"One" , text=b"One"),
Option(value=b"Two" , text=b"Two", selected=True),
Option(value=b"Three" , text=b"Three"),
], path=b"/sample/combo", text=b"Select number"), Br(),Br(),
SwitchCmd(text=b"Change this switch", checked=True, path=b"/sample/switch"),
Br(),
Br(),
Paragraph(b"To eliminate this page delete the <b>sample.py</b> file"))
await response.sendPage(page)
| [
"[email protected]"
] | |
3f3cbc27b98cf2f962ea71902a23a95e4451122e | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/ibmcloud-iam-operator/python/pulumi_pulumi_kubernetes_crds_operators_ibmcloud_iam_operator/_tables.py | 0e341dd0c30a19d942a6a75299bc8fdb6db597f2 | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"access_group_def": "accessGroupDef",
"access_group_id": "accessGroupID",
"access_group_name": "accessGroupName",
"access_group_namespace": "accessGroupNamespace",
"api_version": "apiVersion",
"custom_role_name": "customRoleName",
"custom_role_namespace": "customRoleNamespace",
"custom_roles_d_name": "customRolesDName",
"custom_roles_def": "customRolesDef",
"defined_roles": "definedRoles",
"display_name": "displayName",
"group_id": "GroupID",
"policy_id": "policyID",
"resource_group": "resourceGroup",
"resource_id": "resourceID",
"resource_key": "resourceKey",
"resource_name": "resourceName",
"resource_value": "resourceValue",
"role_crn": "roleCRN",
"role_id": "roleID",
"role_name": "roleName",
"service_class": "serviceClass",
"service_i_ds": "serviceIDs",
"service_id": "serviceID",
"user_email": "userEmail",
"user_emails": "userEmails",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"accessGroupDef": "access_group_def",
"accessGroupID": "access_group_id",
"accessGroupName": "access_group_name",
"accessGroupNamespace": "access_group_namespace",
"apiVersion": "api_version",
"customRoleName": "custom_role_name",
"customRoleNamespace": "custom_role_namespace",
"customRolesDName": "custom_roles_d_name",
"customRolesDef": "custom_roles_def",
"definedRoles": "defined_roles",
"displayName": "display_name",
"GroupID": "group_id",
"policyID": "policy_id",
"resourceGroup": "resource_group",
"resourceID": "resource_id",
"resourceKey": "resource_key",
"resourceName": "resource_name",
"resourceValue": "resource_value",
"roleCRN": "role_crn",
"roleID": "role_id",
"roleName": "role_name",
"serviceClass": "service_class",
"serviceIDs": "service_i_ds",
"serviceID": "service_id",
"userEmail": "user_email",
"userEmails": "user_emails",
}
| [
"[email protected]"
] | |
e5ee33c78f39ec07d8e46db54f732e895ef0f629 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/203.py | 5e6817c4973f210cb1eb8eadcf2f4966ec3e3bb1 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py |
import sys
in_ = sys.stdin
T = int(in_.readline())
for t in xrange(T):
r1 = int(in_.readline())
a1 = [map(int, in_.readline().split(' ')) for i in xrange(4)]
r2 = int(in_.readline())
a2 = [map(int, in_.readline().split(' ')) for i in xrange(4)]
res = set(a1[r1 - 1]).intersection(a2[r2 - 1])
prefix = 'Case #%d:' % (t + 1)
if len(res) == 1:
print prefix, list(res)[0]
elif len(res) > 1:
print prefix, 'Bad magician!'
else:
print prefix, 'Volunteer cheated!'
| [
"[email protected]"
] | |
0212152bb1f6064565aed07a3aab1458cca979a3 | b97e1d652346fb3bd82a2eddf714fb4a95330405 | /src/visualize_san_v2.py | d737f8aada0f0f69d129df0b44ac3553093e9bda | [
"MIT"
] | permissive | satwik77/Transformer-Formal-Languages | 565d49a1500e934a573dade04446710b66307852 | 48eea2ea6e2802ba827868723f75fa6c82401cde | refs/heads/main | 2022-12-26T08:14:38.513836 | 2020-10-10T12:17:59 | 2020-10-10T12:17:59 | 301,586,833 | 17 | 8 | null | null | null | null | UTF-8 | Python | false | false | 13,479 | py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from src.utils.dyck_generator import DyckLanguage
from src.utils.helper import *
from src.utils.logger import get_logger
from src.utils.sentence_processing import sents_to_idx
from src.components.transformers import TransformerModel
import pickle
from attrdict import AttrDict
import logging
import os
import seaborn as sns; sns.set()
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import xticks
import pylab
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import collections
from functools import partial
import ipdb as pdb
all_pairs = ['()', '[]', '{}', '<>', '+-', 'ab', 'xo']
all_open = ['(', '[', '{', '<','+','a','x']
def generate_visualizations(model, config, voc, src_str = '((()))(())()', run_name = 'SAN', iteration = 0,score = 1.0, device = 'cuda:1'):
dlang = DyckLanguage(config.num_par, 0.5, 0.25)
#Convert source string to ids tensor
src = sents_to_idx(voc, [src_str]).transpose(0,1).to(device)[:-1]
#Create directory to save visualizations
dir_path = os.path.join("Figures", run_name)
if os.path.exists(dir_path) == False:
os.mkdir(dir_path)
# Ploting attention weights
def visualize_attn(src, src_str):
output, attn = model.model(src, get_attns = True)
src_len = len(src_str)
attn_maps = []
attn_map = attn[0][0,:src_len,:src_len].detach().cpu().numpy()
for i in range(config.depth):
for j in range(config.heads):
attn_map = attn[i][0,j,:src_len,:src_len].detach().cpu().numpy()
plt.figure(figsize= (15,10))
sns.set(font_scale = 1.5)
g = sns.heatmap(np.log(attn_map), mask = (attn_map == 0).astype(float),annot=attn_map, cmap = sns.cubehelix_palette(100, start=0.7, rot=-0.5, gamma = 1.5), vmin = -2.6, vmax = 0, cbar = False, xticklabels=list(src_str), yticklabels=list(src_str), linewidths=.5) #cmap="YlGnBu")
yticks = g.set_yticklabels(labels = list(src_str), rotation = 360, size = 30)
xticks = g.set_xticklabels(labels = list(src_str), size = 30)
plt.title('Attention Weights Layer: {} Head: {} (It :{})'.format(i+1, j+1, iteration), size = 20)
fig = g.get_figure()
fig.savefig(os.path.join(dir_path, 'attn_weights_depth-{}_heads-{}_it-{}.png'.format(i+1, j+1, iteration)), bbox_inches='tight')
attn_maps.append(attn_map)
return attn_maps
attn_maps = visualize_attn(src, src_str)
# Computing and Ploting Intermediate representations
# Obtaining Embeddings
embeddings = model.model.encoder(src) * np.sqrt(config.d_model)
embed_unq = torch.unique(embeddings, dim = 0)
embed_unq = embed_unq.detach().cpu().numpy().squeeze()
#Plotting Embeddings
plt.figure(figsize = (10, 3))
#embed_unq = embeddings_np[[0,3]]
g = sns.heatmap(embed_unq, annot = embed_unq, cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', yticklabels = ['(', ')', '[',']'])
g.set_title('Embeddings (It: {})'.format(iteration))
fig = g.get_figure()
fig.savefig(os.path.join(dir_path, 'embeddings_it-{}.png'.format(iteration)), bbox_inches='tight')
# Computing queries, keys and values
kqv = list(model.model.transformer_encoder.parameters())[0].detach()
b = list(model.model.transformer_encoder.parameters())[1].detach()
query_matrix, query_bias = kqv[:config.d_model], b[:config.d_model]
key_matrix, key_bias = kqv[config.d_model:config.d_model * 2], b[config.d_model:config.d_model * 2]
value_matrix, value_bias = kqv[config.d_model * 2:], b[config.d_model * 2:]
kqv_vectors = torch.mm(embeddings.squeeze(), kqv.transpose(0,1)) + b
queries, keys, values = kqv_vectors[:,:config.d_model], kqv_vectors[:,config.d_model:config.d_model * 2], kqv_vectors[:,config.d_model * 2:]
# Plotting Query Matrix
sns.set(font_scale = 1.2)
query_matrix_np = query_matrix.detach().cpu().numpy().squeeze()
query_bias_np = query_bias.detach().cpu().numpy().squeeze()[:,None]
f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.8]}, figsize = (10,5))
#f.tight_layout()
g1 = sns.heatmap(query_matrix_np,annot = query_matrix_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax1, cbar = False)
g1.set_title('Query Matrix (It: {})'.format(iteration))
g2 = sns.heatmap(query_bias_np, annot = query_bias_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax2, cbar_ax = ax3)
g2.set_title('Query Bias (It: {})'.format(iteration))
f.savefig(os.path.join(dir_path, 'query_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
# Plotting Key Matrix
sns.set(font_scale = 1.2)
key_matrix_np = key_matrix.detach().cpu().numpy().squeeze()
key_bias_np = key_bias.detach().cpu().numpy().squeeze()[:,None]
#plt.figure(figsize = (10, 10))
f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.8]}, figsize = (10,5))
#f.tight_layout()
g1 = sns.heatmap(key_matrix_np,annot = key_matrix_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax1, cbar = False)
g1.set_title('Key Matrix (It: {})'.format(iteration))
g2 = sns.heatmap(key_bias_np, annot = key_bias_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax2, cbar_ax = ax3)
g2.set_title('Key Bias (It: {})'.format(iteration))
f.savefig(os.path.join(dir_path, 'key_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
# Ploting Value Matrix
sns.set(font_scale = 1.2)
value_matrix_np = value_matrix.detach().cpu().numpy().squeeze()
value_bias_np = value_bias.detach().cpu().numpy().squeeze()[:,None]
#plt.figure(figsize = (10, 10))
f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.8]}, figsize = (10,5))
#f.tight_layout()
g1 = sns.heatmap(value_matrix_np,annot = value_matrix_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax1, cbar = False)
g1.set_title('Value Matrix (It: {})'.format(iteration))
g2 = sns.heatmap(value_bias_np, annot = value_bias_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax2, cbar_ax = ax3)
g2.set_title('Value Bias (It: {})'.format(iteration))
f.savefig(os.path.join(dir_path, 'value_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
#Plotting value vectors
plt.figure(figsize = (10, 3))
values_unq = torch.unique(values, dim = 0)
values_unq = values_unq.detach().cpu().numpy()
#pdb.set_trace()
g = sns.heatmap(values_unq, annot = values_unq, cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', yticklabels = ['(', ')', '[',']'])
g.set_title('Values (It: {})'.format(iteration))
fig = g.get_figure()
fig.savefig(os.path.join(dir_path, 'values_it-{}.png'.format(iteration)), bbox_inches='tight')
# Computing Attention Map
n = len(queries)
mask = torch.tensor(np.triu(np.ones((n,n))).T).float().to(device)
scores = torch.mm(queries, keys.T) / (np.sqrt(3))
scores = scores * mask + (-1e9) * (1 - mask)
attn_map = nn.functional.softmax(scores, dim = -1)
#pdb.set_trace()
#assert np.allclose(attn_map.detach().cpu().numpy(), attn_maps[0])
# Computing attention outputs
attn_outs = torch.mm(attn_map.float(), values.float())
# Ploting attention outputs
seq = src_str
depths = dlang.depth_counter(seq).squeeze()
lens = np.array([i+1 for i in range(len(seq))])
dlratios = [depths[:,i]/lens for i in range(depths.shape[1])]
sns.set(font_scale = 3 ,style = 'ticks', rc={"lines.linewidth": 4})
src_chars = src_str
src_charsv0 = list(src_chars)
src_chars = ['{}_{}'.format(ch,i) for i,ch in enumerate(src_chars)]
attn_values = attn_outs.detach().cpu().numpy()
data = pd.DataFrame([src_chars, attn_values]).transpose()
data.columns = ['dyck', '0-Element']
fig = plt.figure(figsize = (25, 10))
plt.plot(src_chars, attn_values[:,0], marker = 'o', label = 'Coordinate-0', markersize = 12, color = 'r')
plt.plot(src_chars, attn_values[:,1], marker = 'D', label = 'Coordinate-1', markersize = 12, color = 'm')
plt.plot(src_chars, attn_values[:,2], marker = 'v', label = 'Coordinate-2', markersize = 12, color = 'g')
for i,dlratio in enumerate(dlratios):
plt.plot(src_chars, dlratio,'--', marker = 's', markersize = 12, color = 'c', label = '{} DL Ratio'.format(all_open[i]))
plt.legend(loc="upper right")
plt.title("Output of Self-Attention Block (It: {})".format(iteration))
plt.grid()
plt.rc('grid', linestyle="-", color='black')
plt.savefig(os.path.join(dir_path, 'attn_outs-{}.png'.format(iteration)), bbox_inches='tight')
# Computing outputs on applying a linear layer on attention outputs
attn_ffn_w = list(model.model.transformer_encoder.parameters())[2].detach()
attn_ffn_b = list(model.model.transformer_encoder.parameters())[3].detach()
attn_ffn = torch.mm(attn_outs, attn_ffn_w.transpose(0,1)) + attn_ffn_b
sns.set(font_scale = 1.2)
attn_ffn_w_np = attn_ffn_w.detach().cpu().numpy().squeeze()
attn_ffn_b_np = attn_ffn_b.detach().cpu().numpy().squeeze()[:,None]
#plt.figure(figsize = (10, 10))
f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.8]}, figsize = (10,5))
f.tight_layout()
g1 = sns.heatmap(attn_ffn_w_np,annot = attn_ffn_w_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax1, cbar = False)
g1.set_title('Attn-FFN Matrix (It: {})'.format(iteration))
g2 = sns.heatmap(attn_ffn_b_np, annot = attn_ffn_b_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax2, cbar_ax = ax3)
g2.set_title('Attn-FFN Bias (It: {}))'.format(iteration))
f.savefig(os.path.join(dir_path, 'attnffn_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
# Last few important layers
ln1 = model.model.transformer_encoder.layers[0].norm1
ln2 = model.model.transformer_encoder.layers[0].norm2
linear1 = model.model.transformer_encoder.layers[0].linear1
linear2 = model.model.transformer_encoder.layers[0].linear2
try:
activation = model.model.transformer_encoder.layers[0].activation
except:
activation = F.relu
# Feeding attn_ffn obtained in the last cell to residual and layer norm layers
res_out = embeddings.squeeze() + attn_ffn
res_ln_out = ln1(res_out)
# Applying a feed forward network (d_model -> d_ffn -> d_model) to the resulting output from last set
pos_ffn = linear2(activation(linear1(res_ln_out)))
# Applying residual + layer norm to the vectors obtained from last step
res_out2 = (res_ln_out + pos_ffn)
res_ln_out2 = ln2(res_out2)
pos_ffn1_w = list(linear1.parameters())[0]
pos_ffn1_b = list(linear1.parameters())[1]
#Plotting Pos_FFN-1 Weights
sns.set(font_scale = 1.2)
pos_ffn1_w_np = pos_ffn1_w.detach().cpu().numpy().squeeze()
pos_ffn1_b_np = pos_ffn1_b.detach().cpu().numpy().squeeze()[:,None]
#plt.figure(figsize = (10, 10))
f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.4]}, figsize = (10,5))
#f.tight_layout()
g1 = sns.heatmap(pos_ffn1_w_np,annot = pos_ffn1_w_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax1, cbar = False)
g1.set_title('Pos-FFN Layer-1 Matrix (It: {})'.format(iteration))
g2 = sns.heatmap(pos_ffn1_b_np, annot = pos_ffn1_b_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax2, cbar_ax = ax3)
g2.set_title('Pos-FFN Layer-1 Bias (It: {})'.format(iteration))
f.savefig(os.path.join(dir_path, 'posffn1_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
pos_ffn2_w = list(linear2.parameters())[0]
pos_ffn2_b = list(linear2.parameters())[1]
#Plotting Pos_FFN Weights
sns.set(font_scale = 1.2)
pos_ffn2_w_np = pos_ffn2_w.detach().cpu().numpy().squeeze()
pos_ffn2_b_np = pos_ffn2_b.detach().cpu().numpy().squeeze()[:,None]
#plt.figure(figsize = (10, 10))
f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.4]}, figsize = (10,5))
#f.tight_layout()
g1 = sns.heatmap(pos_ffn2_w_np,annot = pos_ffn2_w_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax1, cbar = False)
g1.set_title('Pos-FFN Layer-1 Matrix (It: {})'.format(iteration))
g2 = sns.heatmap(pos_ffn2_b_np, annot = pos_ffn2_b_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', ax = ax2, cbar_ax = ax3)
g2.set_title('Pos-FFN Layer-2 Bias (It: {})'.format(iteration))
f.savefig(os.path.join(dir_path, 'posffn2_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
# Feeding the encoder representations (obtained above) to the output linear layer (called decoder)
decoder_weights = list(model.model.decoder.parameters())[0].detach()
decoder_reps = torch.mm(res_ln_out2, decoder_weights.T)
sns.set(font_scale = 1.2)
decoder_w_np = decoder_weights.detach().cpu().numpy().squeeze()
plt.figure(figsize = (10, 5))
#f,(ax1,ax2, ax3) = plt.subplots(1,3,sharey=False, gridspec_kw={'width_ratios':[6,2, 0.8]}, figsize = (10,5))
#f.tight_layout()
g1 = sns.heatmap(decoder_w_np,annot = decoder_w_np.round(3), cmap = sns.color_palette("coolwarm", 7),
linewidth=1.5, linecolor = 'black', cbar = True)
g1.set_title('Decoder Weights (It: {})'.format(iteration))
plt.savefig(os.path.join(dir_path, 'decoder_wb_it-{}.png'.format(iteration)), bbox_inches='tight')
model.to(device)
| [
"[email protected]"
] | |
1cef9ac509fda187576f823cea484153218edfac | a00a5d4b0c280e9bd3de5be38bcca0c6adf92971 | /zaza/openstack/utilities/swift.py | 67daca04372ee45e1323472a742f3417babace40 | [
"Apache-2.0"
] | permissive | jguedez/zaza-openstack-tests | ea9e02c5669668651eb911f1a8d6cd775d3b07ad | 49a01c8318078ea262bd61eddc5241fd8a878315 | refs/heads/master | 2020-09-25T12:11:14.411917 | 2019-12-11T19:33:03 | 2019-12-11T19:33:03 | 226,002,576 | 0 | 0 | NOASSERTION | 2019-12-05T02:52:10 | 2019-12-05T02:52:09 | null | UTF-8 | Python | false | false | 10,407 | py | # Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Swift utilities."""
import logging
import uuid
import zaza.model
import zaza.openstack.utilities.juju as juju_utils
class ObjectReplica:
"""A replica of an object.
The replica attributes show the location of an object replica.
server: IP address or hostname of machine hosting replica
port: Port of swift object server running on machine hosting replica
device: Path to device hosting replica
handoff_device: Whether this is a handoff devices. Handoff devices pass
the replica on to a remote storage node.
"""
def __init__(self, raw_line):
"""Extract storage info from text."""
rl = raw_line.split()
self.server, self.port = rl[2].split(':')
self.device = rl[3]
self.handoff_device = rl[-1] == '[Handoff]'
class ObjectReplicas:
"""Replicas of an object."""
def __init__(self, proxy_app, account, container_name, object_name,
storage_topology, model_name=None):
"""Find all replicas of given object.
:param proxy_app: Name of proxy application
:type proxy_app: str
:param account: Account that owns the container.
:type account: str
:param container_name: Name of container that contains the object.
:type container_name: str
:param object_name: Name of object.
:type object_name: str
:param storage_topology: Dictionary keyed on IP of storage node info.
:type storage_topology: {}
:param model_name: Model to point environment at
:type model_name: str
"""
self.replicas = []
self.replica_placements = {}
self.storage_topology = storage_topology
raw_output = self.run_get_nodes(
proxy_app,
account,
container_name,
object_name,
model_name=model_name)
for line in self.extract_storage_lines(raw_output):
self.add_replica(line)
def add_replica(self, storage_line):
"""Add a replica to the replica set."""
self.replicas.append(ObjectReplica(storage_line))
def extract_storage_lines(self, raw_output):
"""Extract replica list from output of swift-get-nodes.
:param storage_line: Output of swift-get-nodes
:type storage_line: str
:returns: List of lines relating to replicas.
:rtype: [str, ...]
"""
storage_lines = []
for line in raw_output.split('\n'):
if line.startswith('Server:Port '):
storage_lines.append(line)
return storage_lines
def run_get_nodes(self, proxy_app, account, container_name, object_name,
model_name=None):
"""Run swift-get-nodes for an object on a proxy unit.
:param proxy_app: Name of proxy application
:type proxy_app: str
:param account: Account that owns the container.
:type account: str
:param container_name: Name of container that contains the object.
:type container_name: str
:param object_name: Name of object.
:type object_name: str
:param model_name: Model to point environment at
:type model_name: str
:returns: Stdout of command
:rtype: str
"""
ring_file = '/etc/swift/object.ring.gz'
obj_cmd = "swift-get-nodes -a {} {} {} {}".format(
ring_file,
account,
container_name,
object_name)
cmd_result = zaza.model.run_on_leader(
proxy_app,
obj_cmd,
model_name=model_name)
return cmd_result['Stdout']
@property
def hand_off_ips(self):
"""Replicas which are marked as handoff devices.
These are not real replicas. They hand off the replica to other node.
:returns: List of IPS of handoff nodes for object.
:rtype: List[str]
"""
return [r.server for r in self.replicas if r.handoff_device]
@property
def storage_ips(self):
"""Ip addresses of nodes that are housing a replica.
:returns: List of IPS of storage nodes holding a replica of the object.
:rtype: [str, ...]
"""
return [r.server for r in self.replicas if not r.handoff_device]
@property
def placements(self):
"""Region an zone information for each replica.
Zone info is in the form:
[{
'app_name': str,
'unit': juju.Unit,
'region': int,
'zone': int}, ...]
:returns: List of dicts with region and zone information.
:rtype: List[Dict[str, Union[str,int]]]
"""
return [self.storage_topology[ip] for ip in self.storage_ips]
@property
def distinct_regions(self):
"""List of distinct regions that have a replica.
:returns: List of regions that have a replica
:rtype: [int, ...]
"""
return list(set([p['region'] for p in self.placements]))
@property
def all_zones(self):
"""List of all zones that have a replica.
:returns: List of tuples (region, zone) that have a replica.
:rtype: List[Tuple[str, str]]
"""
return [(p['region'], p['zone']) for p in self.placements]
@property
def distinct_zones(self):
"""List of distinct region + zones that have a replica.
:returns: List of tuples (region, zone) that have a replica.
:rtype: [(r1, z1), ...]
"""
return list(set(self.all_zones))
def get_swift_storage_topology(model_name=None):
"""Get details of storage nodes and which region and zones they belong in.
:param model_name: Model to point environment at
:type model_name: str
:returns: Dictionary of storage nodes and their region/zone information.
:rtype: {
'ip (str)': {
'app_name': str,
'unit': juju.Unit
'region': int,
'zone': int},
...}
"""
topology = {}
status = juju_utils.get_full_juju_status(model_name=model_name)
for app_name, app_dep_config in status.applications.items():
if 'swift-storage' in app_dep_config['charm']:
app_config = zaza.model.get_application_config(
app_name,
model_name=model_name)
region = app_config['region']['value']
zone = app_config['zone']['value']
for unit in zaza.model.get_units(app_name, model_name=model_name):
topology[unit.public_address] = {
'app_name': app_name,
'unit': unit,
'region': region,
'zone': zone}
return topology
def setup_test_container(swift_client, resource_prefix):
"""Create a swift container for use be tests.
:param swift_client: Swift client to use for object creation
:type swift_client: swiftclient.Client
:returns: (container_name, account_name) Container name and account
name for new container
:rtype: Tuple[str, str]
"""
run_id = str(uuid.uuid1()).split('-')[0]
container_name = '{}-{}-container'.format(resource_prefix, run_id)
swift_client.put_container(container_name)
resp_headers, containers = swift_client.get_account()
account = resp_headers['x-account-project-domain-id']
return container_name, account
def apply_proxy_config(proxy_app, config, model_name=None):
"""Update the give proxy_app with new charm config.
:param proxy_app: Name of proxy application
:type proxy_app: str
:param config: Dictionary of configuration setting(s) to apply
:type config: dict
:param model_name: Name of model to query.
:type model_name: str
"""
current_config = zaza.model.get_application_config(
proxy_app,
model_name=model_name)
# Although there is no harm in applying config that is a noop it
# does affect the expected behaviour afterwards. So, only apply
# genuine changes so we can safely expect the charm to fire a hook.
for key, value in config.items():
if str(config[key]) != str(current_config[key]['value']):
break
else:
logging.info(
'Config update for {} not required.'.format(proxy_app))
return
logging.info('Updating {} charm settings'.format(proxy_app))
zaza.model.set_application_config(
proxy_app,
config,
model_name=model_name)
zaza.model.block_until_all_units_idle()
def create_object(swift_client, proxy_app, storage_topology, resource_prefix,
model_name=None):
"""Create a test object in a new container.
:param swift_client: Swift client to use for object creation
:type swift_client: swiftclient.Client
:param proxy_app: Name of proxy application
:type proxy_app: str
:param storage_topology: Dictionary keyed on IP of storage node info.
:type storage_topology: {}
:param resource_prefix: Prefix to use when naming new resources
:type resource_prefix: str
:param model_name: Model to point environment at
:type model_name: str
:returns: (container_name, object_name, object replicas)
:rtype: (str, str, ObjectReplicas)
"""
container_name, account = setup_test_container(
swift_client,
resource_prefix)
object_name = 'zaza_test_object.txt'
swift_client.put_object(
container_name,
object_name,
contents='File contents',
content_type='text/plain'
)
obj_replicas = ObjectReplicas(
proxy_app,
account,
container_name,
object_name,
storage_topology,
model_name=model_name)
return container_name, object_name, obj_replicas
| [
"[email protected]"
] | |
50c09f69645df647c57d349e755ae1932eb71d95 | f07392633118f7f6aff0a5a9b2a5c9eaab1a0299 | /Examples/debugging/wikidef/html2text.py | 1540a7b91357475a72d9628c762fce825fdec14c | [] | no_license | UWPCE-PythonCert/Py300 | afc4abca736cfea031292db6bed996465f37604f | 7f93d20ae66ba9a56c4dcc0c1fdafcf79db15349 | refs/heads/master | 2020-05-26T13:43:38.098926 | 2018-03-05T07:11:48 | 2018-03-05T07:11:48 | 85,002,542 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 26,163 | py | #!/usr/bin/env python3
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.1"
__author__ = "Aaron Swartz ([email protected])"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
def charref(name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not UNICODE_SNOB and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(c):
if not UNICODE_SNOB and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(s):
s = s.group(1)
if s[0] == "#":
return charref(s[1:])
else: return entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(s):
return r_unescape.sub(replaceEntities, s)
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def optwrap(text):
"""Wrap all paragraphs in the provided text."""
if not BODY_WIDTH:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if para[0] != ' ' and para[0] != '-' and para[0] != '*':
for line in wrap(para, BODY_WIDTH):
result += line + "\n"
result += "\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_nest_count(style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / GOOGLE_LIST_INDENT
return nest_count
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class _html2text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
if out is None: self.out = self.outtextf
else: self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
if options.google_doc:
del unifiable_n[name2cp('nbsp')]
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if options.google_doc:
self.outtext = self.outtext.replace(' _place_holder;', ' ');
return self.outtext
def handle_charref(self, c):
self.o(charref(c), 1)
def handle_entityref(self, c):
self.o(entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and options.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o("_")
self.drop_white_space += 1
if bold:
self.o("**")
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o("**")
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o("_")
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if options.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if options.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u']: self.o("_")
if tag in ['strong', 'b']: self.o("**")
if tag in ['del', 'strike']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if options.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not IGNORE_ANCHORS:
if start:
if has_key(attrs, 'href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.o("[")
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if a:
if INLINE_LINKS:
self.o("](" + a['href'] + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not IGNORE_IMAGES:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
if INLINE_LINKS:
self.o("")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("![")
self.o(alt)
self.o("]["+ str(attrs['count']) +"]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if options.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if options.google_doc:
nest_count = google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(options.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0: self.p_p = 1
def p(self): self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None: self.abbr_data += data
if not self.quiet:
if options.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
self.startpre = 0
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
bq += " "
data = data.replace("\n", "\n"+bq)
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
self.o(data, 1)
def unknown_decl(self, data): pass
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text_file(html, out=wrapwrite, baseurl=''):
h = _html2text(out, baseurl)
h.feed(html)
h.feed("")
return h.close()
def html2text(html, baseurl=''):
return optwrap(html2text_file(html, None, baseurl))
class Storage: pass
options = Storage()
options.google_doc = False
options.ul_item_mark = '*'
if __name__ == "__main__":
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=78, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevent when -g is specified as well")
(options, args) = p.parse_args()
# handle options
if options.ul_style_dash:
options.ul_item_mark = '-'
else:
options.ul_item_mark = '*'
BODY_WIDTH = options.body_width
GOOGLE_LIST_INDENT = options.list_indent
# process input
if len(args) > 0:
file_ = args[0]
encoding = None
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
text = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, text)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
data = text.decode(encoding)
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
data = data.decode(encoding)
else:
data = sys.stdin.read()
wrapwrite(html2text(data, baseurl))
| [
"[email protected]"
] | |
dbfd8897256d294f2d33a2b3cbedbd18f900bc80 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-6963.py | 1c234b97e7345024f5b01b294817698a1358d75f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,291 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, $ID, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
90218a595c5ced9205138e11fb5e03416aa92719 | fe75ab418adfd723f48b8eafc80515c9fd913395 | /LeetCode/!0232. Implement Queue using Stacks.py | 7230a81dac20ba8732640c59c0492c589d73a031 | [] | no_license | AshkenSC/Programming-Practice | d029e9d901f51ef750ed4089f10c1f16783d2695 | 98e20c63ce1590deda6761ff2f9c8c37f3fb3c4a | refs/heads/master | 2021-07-20T06:41:12.673248 | 2021-06-25T15:44:06 | 2021-06-25T15:44:06 | 127,313,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | '''
0232. Implement Queue using Stacks
请你仅使用两个栈实现先入先出队列。队列应当支持一般队列的支持的所有操作(push、pop、peek、empty)。
'''
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stk = []
self.temp = []
self.current_queue = 0
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
1) 压入stk
"""
self.stk.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
1)stk所有元素压入temp
2)出栈一次
3)temp中剩余元素压回stk,temp清空
"""
while len(self.stk) > 0:
self.temp.append(self.stk.pop())
top = self.temp.pop()
while len(self.temp) > 0:
cur = self.temp.pop()
self.stk.append(cur)
return top
def peek(self) -> int:
"""
Get the front element.
1)stk所有元素压入temp
2)top一次
3)temp元素放回stk
4)temp清空
"""
while len(self.stk) > 0:
self.temp.append(self.stk.pop())
top = self.temp[-1]
while len(self.temp) > 0:
self.stk.append(self.temp.pop())
return top
def empty(self) -> bool:
"""
Returns whether the queue is empty.
1)检查len(stk)
"""
return len(self.stk) < 1
| [
"[email protected]"
] | |
7476d47d2554a91d4a8cb9c7d91816e908594d7e | 830a0667f2e70177e83ef394bce9972533ea449c | /arrayMaxConsecutiveSum.py | cd8969f3e5943780526236b2440a188963b4a77c | [] | no_license | porosya80/codesignal | 8659fba8cd9001efdca798590bacbfb4d41dc5b5 | f26d5c739b093019a149047317cc32d9aa92541b | refs/heads/master | 2020-03-22T21:54:23.342397 | 2018-12-05T05:05:51 | 2018-12-05T05:05:51 | 140,720,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | def arrayMaxConsecutiveSum(a, k):
res = [sum(a[0:k])]
for i in range(1, len(a)-k+1):
res.append(res[i-1]-a[i-1]+a[i+k-1])
return(max(res))
inputArray = [1, 3, 2, 4]
k = 3
# , the output should be
print(arrayMaxConsecutiveSum(inputArray, k))
# = 9.
# All possible sums of 2 consecutive elements are:
# 2 + 3 = 5
# 3 + 5 = 8
# 5 + 1 = 6
# 1 + 6 = 7.
# Thus, the answer is 8.
| [
"[email protected]"
] | |
2984b7bfb6e62a07535587dad44f16d99b08ffe2 | 9647524c0f4d93fb1c8a992c20fe9f9d2710cde3 | /2-content/Python/pcc-master/chapter_12/ship.py | 0df9da197640b0152c6bf885473c945151b024c2 | [
"MIT"
] | permissive | bgoonz/web-dev-notes-resource-site | 16161aa68e8eecafeaba4dc7abeb957aaee864c5 | e7dc9c30393597cb39830c49c3f51c1486b97584 | refs/heads/master | 2023-09-01T14:04:20.867818 | 2021-06-17T07:56:20 | 2021-06-17T07:56:20 | 329,194,347 | 7 | 5 | MIT | 2021-07-05T06:36:49 | 2021-01-13T04:34:20 | JavaScript | UTF-8 | Python | false | false | 1,417 | py | import pygame
class Ship():
def __init__(self, ai_settings, screen):
"""Initialize the ship, and set its starting position."""
self.screen = screen
self.ai_settings = ai_settings
# Load the ship image, and get its rect.
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the ship's center.
self.center = float(self.rect.centerx)
# Movement flags.
self.moving_right = False
self.moving_left = False
def update(self):
"""Update the ship's position, based on movement flags."""
# Update the ship's center value, not the rect.
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect object from self.center.
self.rect.centerx = self.center
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect)
| [
"[email protected]"
] | |
8cb6535c3e1792c0646c81b62e0b3497165a47f3 | bb976142e482afa6843271ed5c994734eca53e74 | /FPV_ANN_pureRes_4D/utils/AdamW.py | 314264d226666363e86465213d0d5b1a050a1c7e | [
"MIT"
] | permissive | mhansinger/combustionML | 552a7540864a2d6b173204cccfdc82ef8c8b2f8c | 9e60324bbd026979d4241fbdd62faaff873ce2a9 | refs/heads/master | 2021-07-09T18:34:09.462100 | 2020-05-20T11:16:31 | 2020-05-20T11:16:31 | 102,774,653 | 0 | 2 | MIT | 2019-02-27T17:55:15 | 2017-09-07T18:57:16 | Jupyter Notebook | UTF-8 | Python | false | false | 5,130 | py | """From built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import copy
from six.moves import zip
from tensorflow.keras import backend as K
from tensorflow.keras.utils.generic_utils import serialize_keras_object
from tensorflow.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.keras.legacy import interfaces
from tensorflow.keras.optimizers import Optimizer
class AdamW(Optimizer):
"""AdamW optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
weight_decay: float >= 0. Weight decay (L2 penalty) (default: 0.025).
batch_size: integer >= 1. Batch size used during training.
samples_per_epoch: integer >= 1. Number of samples (training points) per epoch.
epochs: integer >= 1. Total number of epochs for training.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101)
"""
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., weight_decay=0.025,
batch_size=1, samples_per_epoch=1,
epochs=1, **kwargs):
super(AdamW, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.weight_decay = K.variable(weight_decay, name='weight_decay')
self.batch_size = K.variable(batch_size, name='batch_size')
self.samples_per_epoch = K.variable(samples_per_epoch, name='samples_per_epoch')
self.epochs = K.variable(epochs, name='epochs')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
'''Bias corrections according to the Adam paper
'''
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
'''Schedule multiplier eta_t = 1 for simple AdamW
According to the AdamW paper, eta_t can be fixed, decay, or
also be used for warm restarts (AdamWR to come).
'''
eta_t = 1.
p_t = p - eta_t*(lr_t * m_t / (K.sqrt(v_t) + self.epsilon))
if self.weight_decay != 0:
'''Normalized weight decay according to the AdamW paper
'''
w_d = self.weight_decay*K.sqrt(self.batch_size/(self.samples_per_epoch*self.epochs))
p_t = p_t - eta_t*(w_d*p)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'weight_decay': float(K.get_value(self.weight_decay)),
'batch_size': int(K.get_value(self.batch_size)),
'samples_per_epoch': int(K.get_value(self.samples_per_epoch)),
'epochs': int(K.get_value(self.epochs)),
'epsilon': self.epsilon}
base_config = super(AdamW, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"[email protected]"
] | |
e664fe95ab614d5939008f413cf277a1b48a0f36 | 3017b7399869057a8be7fb11ee9341b9c8f97ba4 | /qa/elgs/pix_area.py | 87c35a1217fde6a4aa8aaa86526db6b23f0097de | [] | no_license | michaelJwilson/SV-QA | 8f486422eb71b3fbd0d395904fd654ba432bd777 | dd6095d570442852bb28ac9da0f18be7b83cddce | refs/heads/master | 2020-07-29T16:04:55.759155 | 2019-12-20T14:37:23 | 2019-12-20T14:37:23 | 209,872,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,625 | py | import os
import sys
import glob
import fitsio
import matplotlib
import pylab as pl
import pandas as pd
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import numpy.lib.recfunctions as rfn
import healpy as hp
from mpl_toolkits.axes_grid1 import make_axes_locatable
from fast_scatter import fast_scatter
from matplotlib import rc
from astropy.table import Table, vstack
from desitarget.targets import encode_targetid
from desitarget.geomask import is_in_box
from desitarget.targetmask import desi_mask
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.stats import pearsonr
rc('font', **{'family':'serif', 'serif':['Times']})
rc('text', usetex=True)
##
nside = np.int(sys.argv[1])
parea = hp.nside2pixarea(nside, degrees = True)
def read_elgs(nrandom=20000, _all=False):
cols = ['NOBS_G', 'NOBS_R', 'NOBS_Z', 'PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R', 'GALDEPTH_Z', 'PSFSIZE_G', 'PSFSIZE_R', 'PSFSIZE_Z', 'PSFDEPTH_W1', 'PSFDEPTH_W2', 'MASKBITS']
if _all:
nrandom = -1
randoms = fitsio.FITS('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randomsall/randoms-inside-dr8-0.31.0-all.fits')
else:
randoms = fitsio.FITS('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randoms/randoms-inside-dr8-0.31.0-2.fits')
##
randoms = randoms[1][cols + ['RA', 'DEC']][:nrandom]
return randoms, cols
if __name__ == '__main__':
nside = 512
_all = True
save = False
##
randoms, cols = read_elgs(nrandom=5000, _all=_all)
isin = np.ones(len(randoms), dtype=np.float)
for bit in [1, 5, 6, 7, 11, 12, 13]:
isin = isin * (1.0 - np.clip(np.bitwise_and(randoms['MASKBITS'], np.ones_like(randoms['MASKBITS']) * 2 ** bit), a_min=0.0, a_max=1.0))
## isin = isin * np.array([(x & 2 ** bit) == 0 for x in randoms['MASKBITS']]).astype(np.int)
isin = isin.astype(np.bool)
remaining = randoms[isin]
print('Input: {}; Remaining: {} ({})'.format(len(randoms), len(remaining), np.count_nonzero(isin)))
## randoms = Table(data=randoms, names=['RA', 'DEC'] + cols)
## randoms.pprint()
'''
binary = np.load('/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/healmaps/elg_tdensity_{}.npy'.format(nside))
rhpind = binary[:,0]
rhpra = binary[:,1]
rhpdec = binary[:,2]
rtdensity = binary[:,3]
'''
##
npix = hp.pixelfunc.nside2npix(nside)
indices = np.arange(npix)
result = np.zeros_like(indices)
denom = np.zeros_like(indices)
##
hppix = hp.ang2pix(nside, (90. - remaining['DEC']) * np.pi / 180., remaining['RA'] * np.pi / 180., nest=False)
hpind, cnts = np.unique(hppix, return_counts=True)
for i, ind in enumerate(hpind):
result[ind] += cnts[i]
hppix = hp.ang2pix(nside, (90. - randoms['DEC']) * np.pi / 180., randoms['RA'] * np.pi / 180., nest=False)
hpind, cnts = np.unique(hppix, return_counts=True)
for i, ind in enumerate(hpind):
denom[ind] += cnts[i]
mask = np.array(denom > 0.0).astype(np.int)
result = mask * result / denom
occupied = result[result > 0.0]
print('\n\nDeviation level: {:.3} per cent.'.format(100. * np.std(occupied)))
##
theta, phi = hp.pix2ang(nside, range(npix), nest=False)
hpra, hpdec = 180. / np.pi * phi, 90. -180. / np.pi * theta
if save:
np.save(os.environ['CSCRATCH'] + '/BGS/SV-ASSIGN/elgs/pix_area.npy', np.c_[np.arange(npix), hpra, hpdec, result])
print('\n\nDone.\n\n')
if not _all:
fig, axarr = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))
plt.subplots_adjust(left = 0.05, right = 0.95, hspace=0.4, wspace=0.2, top = 0.955, bottom = 0.025)
hpra[hpra > 300.] -= 360.
hpra += 60.
fast_scatter(axarr, hpra, hpdec, mask, -0.1, 1.1, 50, cmap='BuPu', printit=False)
axarr.set_xlim(365., -5.)
pl.savefig('pix_area_test.png')
| [
"[email protected]"
] | |
9558a3c81f1188302d59f528f70fd25f55607ec0 | 7d5f4e79ceb58bb8efadef11cd34435ac9ae7ea2 | /Labs/L01/3.py | a7babd169979229958ad8bae4697b962192be005 | [] | no_license | rjherrera/IIC1103 | 7cc58b06103ed12a1534f86b89bec0615ae707cf | e48706b14f16d437bc667bd65c17af9311aca851 | refs/heads/master | 2021-01-21T14:34:11.064502 | 2014-11-20T23:59:25 | 2014-11-20T23:59:25 | 95,306,076 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | import sys
x=float(sys.stdin.readline().strip())
c=(x**3)+((5/2)*(x**2))+(6*x)-6
print(c)
| [
"[email protected]"
] | |
2f566142ff25a00fdbacb0a7a723b1cd6b2bad90 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/90/usersdata/197/60329/submittedfiles/matriz2.py | 85af8758f0a532d442df4077a2a04b076b7f95fe | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # -*- coding: utf-8 -*-
import numpy AS np
n=int(input('Digite o numero de linhas e colunas:'))
a=np.zeros((n,n))
for i in range (0, a.shape[0]
def somas iguais(a):
| [
"[email protected]"
] | |
5dc345bc932009a8b62c1f5f0ee1a5f1c621fe79 | faf4c1055f50ca4dd81d0fc2f16f5e95905e3827 | /protocol/split_mutations.py | ec227caabe40b8148918a621e55bf3e32706bad5 | [] | no_license | Ningshiqi/protocol | 07e0e95c57d74e0ea2bbbfd7d49f9dc42c5dc37e | 8a906a79a2835a61d252cafc2452b407230c4409 | refs/heads/master | 2021-01-20T07:27:38.880511 | 2016-06-22T18:39:15 | 2016-06-22T18:39:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | from random_split import RandomSplit
import pandas as pd
import sys
import argparse
import os
def mymkdir(dir_path):
try:
os.mkdir(dir_path)
except OSError:
pass
def parse_arguments():
d = ('Generate splits of the data while respecting the proportion '
'of each tumor type in the split.')
parser = argparse.ArgumentParser(description=d)
help_str = 'Mutations in correct format'
parser.add_argument('-i', '--input',
type=str, required=True,
help=help_str)
help_str = 'Number of times to perform random split of data'
parser.add_argument('-n', '--number',
type=int, default=1,
help=help_str)
help_str = ('Column name containing the sample ID. Default: automatically'
' checks whether "Tumor_Sample_Barcode" or "Tumor_Sample" is a column.')
parser.add_argument('-s', '--sample-col',
type=str, default=None,
help=help_str)
help_str = 'Output directory for mutations split into two files'
parser.add_argument('-o', '--output',
type=str, required=True,
help=help_str)
args = parser.parse_args()
return vars(args)
def main(opts):
# try to make directory
mymkdir(opts['output'])
# read in data
df = pd.read_csv(opts['mutations'], sep='\t')
# figure out the sample column
if opts['sample_col'] is not None:
samp_col = opts['sample_col']
if 'Tumor_Sample_Barcode' in df.columns:
samp_col = 'Tumor_Sample_Barcode'
elif 'Tumor_Sample' in df.columns:
samp_col = 'Tumor_Sample'
else:
logger.warning('Please specify the column name for the sample ID (--sample-col)')
sys.exit(1)
# setup random splitting object
SAMPLE_RATE = .5 # half splits
dfg = RandomSplit(df.copy(),
col_name=samp_col,
sub_sample=SAMPLE_RATE,
num_iter=opts['number'])
# make random splits
for i, (left_df, right_df) in enumerate(dfg.dataframe_generator()):
output_dir = os.path.join(opts['output'], 'Iteration_{0}'.format(i))
mymkdir(output_dir)
# make sure data is sorted by genes, so no problem with entropy script
# assuming sorted order
left_df.sort(columns=['Gene'], inplace=True)
right_df.sort(columns=['Gene'], inplace=True)
lout_path = os.path.join(output_dir, 'first.txt')
left_df.to_csv(lout_path,
sep='\t', index=False)
rout_path = os.path.join(output_dir, 'second.txt')
right_df.to_csv(rout_path,
sep='\t', index=False)
if __name__ == '__main__':
opts = parse_arguments()
main(opts)
| [
"[email protected]"
] | |
2421c173dbdd049106e02be81e7dfa5430fd987e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02629/s238693041.py | ab02df2f11872200d5371b13356ba05aa505c465 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import sys , math
from bisect import bisect
N=int(input())
alp="abcdefghijklmnopqrstuvwxyz"
R=[]
i=1
tot = 0
while tot < 1000000000000001:
tar = 26**i
tot+=tar
R.append(tot+1)
i+=1
keta = bisect(R , N)+1
if keta == 1:
print(alp[N-1])
sys.exit()
ans = ""
M = N - R[keta - 1]
for i in range(keta):
j = keta - i - 1
ind = M // (26**j)
M -= ind * (26**j)
ans+=alp[ind]
print(ans) | [
"[email protected]"
] | |
da07d731ba5449ac6f1e1a753266a1d267e42d87 | 99af867b8112a2a7ca55b204d795141ccf6a3906 | /ironbox/evaluation_models/classify_fcnet.py | 7a5e081fc1718e43c924985a3198e2a21d39fccc | [] | no_license | evanthebouncy/class_and_style | 5f165f75e3850919e90a05a72533209429efed04 | d434e2414526bece7d6757f4c9c1ccb94263769f | refs/heads/master | 2020-03-29T18:09:36.417411 | 2019-02-04T22:10:33 | 2019-02-04T22:10:33 | 150,197,318 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# ===================== FC NN CLASSIFIER =====================
def to_torch(x, dtype, req = False):
tor_type = torch.cuda.LongTensor if dtype == "int" else torch.cuda.FloatTensor
x = Variable(torch.from_numpy(x).type(tor_type), requires_grad = req)
return x
# A simple fully connected neural nework representing a function that maps
# A input_dim input to k classes
class FCNet(nn.Module):
# require input dimension, k_classes outputs
# input-layer, input_dim // 2 hidden layer, k_class output layer
def __init__(self, input_dim, k_classes):
super(FCNet, self).__init__()
self.name = "FCNet"
self.fc = nn.Linear(input_dim, input_dim // 2)
self.pred = nn.Linear(input_dim // 2, k_classes)
self.opt = torch.optim.Adam(self.parameters(), lr=0.001)
def predict(self, x):
x = F.relu(self.fc(x))
x = F.log_softmax(self.pred(x), dim=1)
return x
# train until saturation
# assume train_corpus is a data_holder that supports a 'get_sample(n_batch)'
# function which return some samples
def learn(self, train_corpus):
losses = []
while True:
# terminate if no improvement
if len(losses) > 2100:
if losses[-1] < 1e-3:
break
near_data_loss = np.mean(losses[-1000:])
far_data_loss = np.mean(losses[-2000:-1000])
# if average loss of last 1k iteration is greater than 99% of the
# last last 1k iterations, stop too
if near_data_loss > 0.99 * far_data_loss:
break
# randomly sample a batch of data
X_batch, Y_batch = train_corpus.get_sample(40)
# convert to proper torch forms
X_batch = to_torch(X_batch, "float")
Y_batch = to_torch(Y_batch, "int")
# optimize
self.opt.zero_grad()
output = self.predict(X_batch)
loss = F.nll_loss(output, Y_batch)
losses.append( loss.data.cpu().numpy() )
loss.backward()
self.opt.step()
# evaluate the model on the test_corpus, here test_corpus is assumed to be simply
# a pair of X, Y
def evaluate(self, test_corpus):
X, Y = test_corpus
X = to_torch(X, "float")
label_pred = np.argmax(self.predict(X).data.cpu().numpy(), axis=1)
return np.sum(label_pred != Y) / len(Y)
if __name__ == '__main__':
fcnet = FCNet(100, 4)
print ("hi")
| [
"[email protected]"
] | |
b761885bc3e88a2116f44fd2680442026270b9cd | 30b4d3122db7146d07a6eb431f6c1030f716aaa8 | /memegen/memegen/routes/examples.py | a25caff591e76aece6f65e128a388b085439301a | [
"MIT"
] | permissive | flavienbwk/memegen | b00df3d2a2fb68f7b2de88e0ed158280f99f7fa7 | dcb5635ad556d1c855fc3851609b32b1be133441 | refs/heads/master | 2020-11-25T03:38:56.626649 | 2019-12-16T23:59:24 | 2019-12-16T23:59:24 | 228,484,764 | 0 | 0 | NOASSERTION | 2019-12-16T22:09:08 | 2019-12-16T22:09:07 | null | UTF-8 | Python | false | false | 482 | py | from flask import Blueprint, render_template, current_app, make_response
from ._utils import samples
blueprint = Blueprint('examples-page', __name__)
@blueprint.route("/examples")
def get():
sample_images = list(samples())
html = render_template(
"examples.html",
sample_images=sample_images,
config=current_app.config,
)
response = make_response(html)
response.headers['Cache-Control'] = f'max-age={60*60*24*7}'
return response
| [
"[email protected]"
] | |
d003a7c6ff75f74dbe0eb5bd94675975cd7a687c | b7a31624f827b16f2b6e1cc6b61bf8529191ff0f | /matplotlib/多序列堆积条状图.py | 49140fe1a9bbbb3e291ee0348f5d86c95563913a | [] | no_license | thj120000/python | eb7daddefef6b9a1039e8ef58c3af88e05ffb794 | a52aad1e57b100064db3e1e3a2af3c02eded4bf7 | refs/heads/master | 2020-04-24T19:29:48.741308 | 2019-03-03T11:11:12 | 2019-03-03T11:11:12 | 172,213,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | #!@Author : Sanwat
#!@File : .py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
'''
bar()类型,可以将pandas 的dataframe he series用图表表示出来
'''
plt.figure(1)
series1= np.array([3,4,5,3])
series2= np.array([1,2,2,5])
series3= np.array([2,3,3,4])
index= np.arange(4)
plt.axis([0,4,0,15])
plt.bar(index,series1,color='r')
plt.bar(index,series2,color='g',bottom=series1)
plt.bar(index,series3,color='b',bottom=series1+series2)
plt.xticks(index-0.4,['Jan15','Feb15','Mar15','Apr15'])
plt.show()
plt.figure(2)
plt.axis([0,15,0,4])
plt.barh(index,series1,color='g')
plt.barh(index,series2,color='r',left=series1)
plt.barh(index,series3,color='b',left=series1+series2)
plt.yticks(index-0.4,['Jan','Feb','Mar','Apr'])
plt.show()
'''
下面我们用不同的颜色来区分多个序列。
'''
plt.figure(3)
plt.axis([0,15,0,4])
plt.title('A Multiseries Horizontal Bar Chart',fontsize=20,color='r')
plt.barh(index,series1,color='w',hatch='xx')#hatch关键字指定线条的类型
plt.barh(index,series2,color='w',hatch= '///',left=series1)
plt.barh(index,series3,color='w',hatch='\\\\\\',left=series1+series2)
plt.yticks(index-0.4,['Jan','Feb','Mar','Apr'])
plt.show()
| [
"[email protected]"
] | |
c2a45a4d33438fa9313023600490c3ba37f977e5 | 751b094918ae9200afe7824d58804549082caa95 | /src/python/WMComponent/DBS3Buffer/Oracle/DBSBufferFiles/SetLocationByLFN.py | b5ca849beb2bedc814bf1f1956da2893d1dd3764 | [] | no_license | cinquo/WMCore | 7ebd13269f42eb97f416f8f2bdaca05fa93c6afc | 122f9332f2e944154dd0df68b6b3f2875427b032 | refs/heads/master | 2021-01-09T06:28:58.947626 | 2013-06-05T08:31:53 | 2013-06-05T08:31:53 | 2,965,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | #!/usr/bin/env python
"""
_SetLocationByLFN_
Oracle implementation of DBSBuffer.SetLocationByLFN
"""
from WMComponent.DBS3Buffer.MySQL.DBSBufferFiles.SetLocationByLFN import SetLocationByLFN as MySQLSetLocationByLFN
class SetLocationByLFN(MySQLSetLocationByLFN):
"""
Set the location of files using lfn as the key
"""
sql = """INSERT INTO dbsbuffer_file_location (filename, location)
SELECT df.id, dl.id
FROM dbsbuffer_file df, dbsbuffer_location dl
WHERE df.lfn = :lfn
AND dl.se_name = :sename
"""
| [
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] | sfoulkes@4525493e-7705-40b1-a816-d608a930855b |
15c3b44a518019dd9db25700f49622a3c9501ea3 | 488ae9723f148082e949448eed2fdcb4c111e784 | /bill-calculator.py | 61c9064f33cd5cda10414550cac73be05a057b70 | [] | no_license | priyanka-advani/hba_functions_exercise | 91b55ab221557d6358a6364bc8d26bb9644b12fc | d4a1d6cbfc90cc3fbee8157ba2e8707d36ef8484 | refs/heads/master | 2021-01-20T02:39:11.656709 | 2017-04-26T04:20:16 | 2017-04-26T04:20:16 | 89,437,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | """
This is Part 4 of the Hackbright Prep functions exercise
"""
def calculate_tip(bill_amt, tip_percentage):
"""Given the bill amount and tip percentage, calculates the tip."""
# ENTER YOUR CODE HERE
tip_amt = bill_amt * tip_percentage
return tip_amt
def calculate_total(bill_amt, tip_amt):
"""Given the tip amount and the bill amount, calculates the total bill."""
# ENTER YOUR CODE HERE
total = bill_amt + tip_amt
return total
def split_bill(total, number_of_people):
"""Given the bill total and the number of people, calculates the total per person."""
# ENTER YOUR CODE HERE
total_per_person = total / number_of_people
return total_per_person
def total_per_person():
"""Gets user input for bill amount, tip %, and # of people. Returns total per person.
This function should:
1. Get user input for the bill amount, tip percentage, and # of people
2. Calculate the tip amount and save it to a variable.
3. Using the tip amount calculated above, find the total bill amount.
4. Using the total found above, calculate the total per person.
"""
# ENTER YOUR CODE HERE
bill_amt = int(raw_input("Enter bill amount: "))
tip_percentage = float(raw_input("Enter tip percentage: "))
number_of_people = int(raw_input("Enter number of people: "))
tip_amt = calculate_tip(bill_amt, tip_percentage)
total = calculate_total(bill_amt, tip_amt)
per_person = split_bill(total, number_of_people)
return per_person
##############################################################################
# Don't touch the code below, this will allow us to run the total_per_person function when we
# run our python file in the terminal using `python bill-calculator.py`
if __name__ == "__main__":
print total_per_person() | [
"[email protected]"
] | |
493c4d78f5463a27107352a23c732fa1f16841e4 | def4838e05acb0932f3b51ce689b5b264bf4ebfd | /apps/users/forms.py | bfaf6e3a5f29e314158102103fa93e41451581f5 | [] | no_license | shishengjia/Django_By_Example_Bookmarks | 6ce125b2f15348b6b2fec26c4ff8694f000194e6 | 377d7ce31ea66e5e55032fcf47d84a1afd9acdbe | refs/heads/master | 2021-01-19T14:16:52.566325 | 2017-04-27T12:23:41 | 2017-04-27T12:23:41 | 88,141,126 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from django import forms
from .models import UserProfile
class LoginForm(forms.Form):
username = forms.CharField(error_messages={'required': '请填写您的姓名'})
password = forms.CharField(min_length=6, error_messages={'required': '请填写您的密码',
'min_length': '密码不能少于6位'})
class RegisterForm(forms.ModelForm):
username = forms.CharField()
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = UserProfile
fields = ('username', )
| [
"[email protected]"
] | |
d50e226c0416782f1bbf88fe9f0570b0fca3618a | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_thailand_idcard_request.py | b0ea673ab51acb3c850cea2011a6aae59dd80fec | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,613 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecognizeThailandIdcardRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'enterprise_project_id': 'str',
'body': 'ThailandIdcardRequestBody'
}
attribute_map = {
'enterprise_project_id': 'Enterprise-Project-Id',
'body': 'body'
}
def __init__(self, enterprise_project_id=None, body=None):
"""RecognizeThailandIdcardRequest
The model defined in huaweicloud sdk
:param enterprise_project_id: 企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:type enterprise_project_id: str
:param body: Body of the RecognizeThailandIdcardRequest
:type body: :class:`huaweicloudsdkocr.v1.ThailandIdcardRequestBody`
"""
self._enterprise_project_id = None
self._body = None
self.discriminator = None
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if body is not None:
self.body = body
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this RecognizeThailandIdcardRequest.
企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:return: The enterprise_project_id of this RecognizeThailandIdcardRequest.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this RecognizeThailandIdcardRequest.
企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:param enterprise_project_id: The enterprise_project_id of this RecognizeThailandIdcardRequest.
:type enterprise_project_id: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def body(self):
"""Gets the body of this RecognizeThailandIdcardRequest.
:return: The body of this RecognizeThailandIdcardRequest.
:rtype: :class:`huaweicloudsdkocr.v1.ThailandIdcardRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RecognizeThailandIdcardRequest.
:param body: The body of this RecognizeThailandIdcardRequest.
:type body: :class:`huaweicloudsdkocr.v1.ThailandIdcardRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeThailandIdcardRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ad74b9faa2c23ac8cee70d2afbcfb959ddcb1216 | eca2012b2b3d970219d59b369cbeec54b1fa6cd9 | /App_Customer/views.py | 780ba1df5fe3d5a04b1b2121a07a5df1350445ab | [] | no_license | tasim313/softwareengineeringdesigncapstoneproject | 0874ca003f64d6d079a201193d1246a8c485362c | 6f4910a024c4233d360321347ab9e0679502b23b | refs/heads/master | 2023-06-08T15:55:36.905104 | 2020-12-15T17:20:10 | 2020-12-15T17:20:10 | 320,646,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.shortcuts import render
def main(request):
return render(request, 'App_Customer/customer_login.html') | [
"[email protected]"
] | |
c4cf4c885cd3eb01d8f58c317d42c028c234ea00 | 2103809bc3df62449488328946d0914241ced863 | /Project Specific Demo/TrackingInput.py | d24851795ed6c84c16062e6b02261e832b991cd9 | [] | no_license | PMiskew/Year9DesignCS4-PythonPM | b5fb4382091cd8be63a43371ac614be74fb4c268 | eeeace0b0ff89e1e4a65361f4e40167e7f4c3cc4 | refs/heads/master | 2021-06-23T17:09:22.626668 | 2019-09-21T23:30:25 | 2019-09-21T23:30:25 | 148,173,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | import tkinter as tk
def submit():
print("Submit pressed")
list.append(ent.get())
print(list)
lab.config(text = "Changed")
#This function will parse a string and
#add a new element to the list for all
#values
#creates an empty list
list = []
root = tk.Tk()
lab = tk.Label(root, text = "Input Food")
lab.pack()
ent = tk.Entry(root)
ent.pack()
btn = tk.Button(root, text = "Submit", command = submit)
btn.pack()
root.mainloop() | [
"[email protected]"
] | |
61b9db032c2ab839765d189ba8fcfa04f26494d8 | 6a6984544a4782e131510a81ed32cc0c545ab89c | /src/production-histograms/.svn/pristine/61/61b9db032c2ab839765d189ba8fcfa04f26494d8.svn-base | 65cb661716719f6def08c48e49a24dc9e8215348 | [] | no_license | wardVD/IceSimV05 | f342c035c900c0555fb301a501059c37057b5269 | 6ade23a2fd990694df4e81bed91f8d1fa1287d1f | refs/heads/master | 2020-11-27T21:41:05.707538 | 2016-09-02T09:45:50 | 2016-09-02T09:45:50 | 67,210,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | from math import cos, log10, isnan
from I3Tray import I3Units
from icecube import icetray, dataclasses
from icecube.production_histograms.histograms.histogram import Histogram
from icecube.production_histograms.histogram_modules.histogram_module import HistogramModule
class CORSIKAWeightModule(HistogramModule):
def __init__(self):
HistogramModule.__init__(self)
self.frame_key = "CorsikaWeightMap"
self.append(Histogram(, , , "FluxSum"))
self.append(Histogram(, , , "Weight"))
def DAQ(self, frame):
if self.frame_key not in frame :
return
weight_dict = frame[self.frame_key]
if "FluxSum" in weight_dict:
self.histograms["FluxSum"].fill(weight_dict["FluxSum"])
if "Weight" in weight_dict:
self.histograms["Weight"].fill(weight_dict["Weight"])
| [
"[email protected]"
] | ||
2efbf244a54953af90a9268a39fcbe2a09738c25 | ac15eda44e8dcfee6dff62f514c5b98a3382f50d | /python/pygame/ftetris/lib/main.py | a645b4bdff5dcc5af6ca9c0ec63994b06f58037b | [] | no_license | yangruihan/raspberrypi | 5789c1a2e72d4012d46563d0644b08d032d346e6 | 22bc1a06b25e129a4314f4bc9cec5112affda136 | refs/heads/master | 2022-12-27T09:27:05.102020 | 2020-10-13T09:41:24 | 2020-10-13T09:41:24 | 32,977,936 | 4 | 0 | null | 2022-12-16T01:47:53 | 2015-03-27T09:30:43 | Java | UTF-8 | Python | false | false | 374 | py | import tetris
class Main:
def __init__(self, screen):
self.screen = screen
def run(self, elapse):
return self.tetris.update(elapse)
def start(self, kind):
if kind == 6:
self.tetris = tetris.Tetris(self.screen)
else:
self.tetris = eval(
"tetris.Tetris" + str(kind) + "(self.screen)")
| [
"[email protected]"
] | |
5a8960b4e94e566305e39cc4e66dbd7bf77d4ab6 | 1dadb20cff6127dd950521aa5a747f6309eac7cd | /users/models.py | c3e6b289a6deca7d3547d180502d55e9938f8027 | [] | no_license | profmcdan/csu-core | a40ca42104728146e5951bdb2856c75d0090a96a | 24091936f167fb03988da9226b76a90d1a6259d9 | refs/heads/master | 2022-12-12T12:15:06.106113 | 2019-11-17T13:04:09 | 2019-11-17T13:04:09 | 220,621,544 | 0 | 0 | null | 2022-12-08T06:51:12 | 2019-11-09T09:44:07 | Python | UTF-8 | Python | false | false | 1,955 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin):
created_on = models.DateTimeField(
auto_now_add=True,
null=True,
)
modified_on = models.DateTimeField(
auto_now=True,
null=True,
)
deactivated = models.DateTimeField(
blank=True,
null=True,
verbose_name='Deactivated',
help_text='Designates whether this user should be treated as deactivated',
)
first_name = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='First Name',
)
last_name = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='Last Name',
)
middle_name = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='Middle Name',
)
email = models.EmailField(
unique=True,
max_length=50,
verbose_name='Email',
)
phone = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='Phone Number',
)
is_staff = models.BooleanField(
default=False,
verbose_name='Is Staff',
help_text='Designates whether the user can log into Django admin site',
)
password_reset_token = models.CharField(
max_length=50,
blank=True,
null=True,
verbose_name='Password Reset Token',
)
invitation_token = models.CharField(
max_length=50,
blank=True,
null=True,
verbose_name='Invitation Token'
)
USERNAME_FIELD = 'email'
objects = UserManager()
class Meta:
get_latest_by = 'date_joined'
def __str__(self):
return self.email
| [
"[email protected]"
] | |
40a7027d6d97d67adb0811ae98b4be1e70558c8a | 05a70c12df808455100598d8a6fdb5635c641ab8 | /Ago-Dic-2019/Ejemplos/Design Patterns/Strategy/ShellSortStrategy.py | a79568c20a8f1ce41d829d0bea6a44c82658e388 | [
"MIT"
] | permissive | Jonathan-aguilar/DAS_Sistemas | 991edcc929c33ba9bb8bc84e741b55c10a8420a3 | 4d02efc64161871084df1bff258112351e5d1241 | refs/heads/development | 2023-07-24T12:26:54.698452 | 2021-09-02T20:52:26 | 2021-09-02T20:52:26 | 289,764,892 | 1 | 0 | MIT | 2021-09-02T20:52:27 | 2020-08-23T20:54:55 | Python | UTF-8 | Python | false | false | 192 | py | from SortStrategy import SortStrategy
class ShellSortStrategy(SortStrategy):
"""docstring for ShellSortStrategy"""
def sort(self, my_list = []):
return "Lista ordenada con Shell Sort!"
| [
"[email protected]"
] | |
9370579be77282af8b2b53e2f4fac9a305ab7f6d | 825c73b9a0db8f65d948a127bd8ed772192f42a3 | /tsai/models/RNN.py | 138e27f0cfda1214a4fe9ca97f9c829c59e52f4c | [
"Apache-2.0"
] | permissive | avimec13/tsai | d9dd5b7529554f238984ac82ab74cc41cc419b83 | 8ffa0afbcac6f886c3cb8310fa60d636becb1799 | refs/heads/main | 2023-06-15T16:34:44.215741 | 2021-07-17T19:34:32 | 2021-07-17T19:34:32 | 388,736,804 | 1 | 0 | Apache-2.0 | 2021-07-23T08:47:26 | 2021-07-23T08:47:26 | null | UTF-8 | Python | false | false | 1,203 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/105_models.RNN.ipynb (unless otherwise specified).
__all__ = ['RNN', 'LSTM', 'GRU']
# Cell
from ..imports import *
from .layers import *
# Cell
class _RNN_Base(Module):
def __init__(self, c_in, c_out, hidden_size=100, n_layers=1, bias=True, rnn_dropout=0, bidirectional=False, fc_dropout=0.):
self.rnn = self._cell(c_in, hidden_size, num_layers=n_layers, bias=bias, batch_first=True, dropout=rnn_dropout, bidirectional=bidirectional)
self.dropout = nn.Dropout(fc_dropout) if fc_dropout else noop
self.fc = nn.Linear(hidden_size * (1 + bidirectional), c_out)
def forward(self, x):
x = x.transpose(2,1) # [batch_size x n_vars x seq_len] --> [batch_size x seq_len x n_vars]
output, _ = self.rnn(x) # output from all sequence steps: [batch_size x seq_len x hidden_size * (1 + bidirectional)]
output = output[:, -1] # output from last sequence step : [batch_size x hidden_size * (1 + bidirectional)]
output = self.fc(self.dropout(output))
return output
class RNN(_RNN_Base):
_cell = nn.RNN
class LSTM(_RNN_Base):
_cell = nn.LSTM
class GRU(_RNN_Base):
_cell = nn.GRU | [
"“[email protected]”"
] | |
8eecc943b82223f0ac6ee54536b2991412a8e8e1 | af2e728ecbec0c55b183dbbfc9ee58f0da7ae1a5 | /django/contrib/contenttypes/migrations/0002_remove_content_type_name.py | 1b91437dbc5b33a0bc3cc8ac834231ca7fcea139 | [
"BSD-3-Clause"
] | permissive | mdiener21/django | 797f366d506a590e4d9dd52d20bfa12436b0c183 | c62791bfe4e9e18304bb125e5619c34d8afeb19f | refs/heads/master | 2021-01-22T13:02:52.947589 | 2015-01-23T10:13:27 | 2015-01-23T10:13:27 | 29,727,259 | 1 | 0 | null | 2015-01-23T10:06:18 | 2015-01-23T10:06:18 | null | UTF-8 | Python | false | false | 1,107 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in ContentType.objects.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
| [
"[email protected]"
] | |
cce788f7d4808adac6a84342dfe4fc04fb0035c7 | 5e27c7f5426c169fd348b26e94b65c35f9cdc459 | /tutorial/canvas/start3/workers/coloredtextbox_panda.py | c908c7a9b44518334e0eeb4234d8fe945f96a599 | [
"BSD-2-Clause"
] | permissive | agoose77/hivesystem | e2c9c27408233b5794151ca74f541d2e6063d58a | e1f55c5ea530a989477edb896dcd89f3926a31b8 | refs/heads/master | 2020-07-21T23:07:37.178856 | 2014-08-23T02:13:19 | 2014-08-23T02:13:19 | 20,776,359 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | # try to import Panda3D, but delay exceptions until the class is actually used
try:
from panda3d.core import NodePath, TextNode
import panda3d
except ImportError:
panda3d = None
#coloredtextbox class: this will be converted to a canvas drone using "build_canvasdrone"
class coloredtextbox(object):
#obligatory argument list for __init__: canvasdrone, object, identifier, parameters
def __init__(self, canvasdrone, ctb, identifier, parameters):
if panda3d is None: raise ImportError("Cannot locate Panda3D")
from dragonfly.canvas import box2d
if identifier is None: identifier = ""
self.node = None
box = box2d(ctb.posx, ctb.posy, ctb.sizex, ctb.sizey, ctb.sizemode)
self.pnode = canvasdrone._get_parent_nodepath(identifier, box)
self._show(ctb, identifier)
#obligatory method "update". Argument list: object, identifier, parameters
def update(self, ctb, identifier, parameters):
self._show(ctb, identifier)
#obligatory method "remove"
def remove(self):
if self.pnode is not None:
self.pnode.removeNode()
self.pnode = None
def _show(self, ctb, identifier):
if self.node is not None: self.node.removeNode()
tnode = TextNode(identifier)
tnode.setText(ctb.text)
r, g, b, a = ctb.textcolor
tnode.setTextColor(r, g, b, a)
r, g, b, a = ctb.boxcolor
tnode.setCardColor(r, g, b, a)
tnode.setCardAsMargin(0, 0, 0, 0)
tnode.setCardDecal(True)
node = NodePath(tnode)
self._scale(tnode, node)
node.reparentTo(self.pnode)
self.node = node
def _scale(self, tnode, node):
top, bottom = tnode.getTop(), tnode.getBottom()
l, r = tnode.getLeft(), tnode.getRight()
w, h = r - l, top - bottom
scalex = 0
if w > 0: scalex = 1.0 / w
scaley = 0
if h > 0: scaley = 1.0 / h
node.setScale(scalex, 1, -scaley)
dimx = w * scalex
midx = (l * scalex + r * scalex) / 2.0
dimy = h * scaley
midy = (top * scaley + bottom * scaley) / 2.0
node.setPos(-midx + 0.5, 0, midy - 0.5)
import bee
from dragonfly.canvas import canvasdrone
from dragonfly.pandahive import build_canvasdrone
coloredtextbox_panda = build_canvasdrone(
wrappedclass=coloredtextbox,
classname="coloredtextbox_panda",
drawshow="show",
drawshowtype=("object", "coloredtextbox"),
baseclass=canvasdrone
)
| [
"[email protected]"
] | |
056827d6726415be64604877d04fdf1f8782a92d | 7704dfa69e81c8a2f22b4bdd2b41a1bdad86ac4a | /nailgun/nailgun/test/unit/test_node_nic_handler.py | f4ff78924c63dfae2129231aece7f06a7027e081 | [
"Apache-2.0"
] | permissive | andrei4ka/fuel-web-redhat | 8614af4567d2617a8420869c068d6b1f33ddf30c | 01609fcbbae5cefcd015b6d7a0dbb181e9011c14 | refs/heads/master | 2022-10-16T01:53:59.889901 | 2015-01-23T11:00:22 | 2015-01-23T11:00:22 | 29,728,913 | 0 | 0 | Apache-2.0 | 2022-09-16T17:48:26 | 2015-01-23T10:56:45 | Python | UTF-8 | Python | false | false | 17,786 | py | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_get_handler_with_wrong_nodeid(self):
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': 1}),
expect_errors=True,
headers=self.default_headers)
self.assertEqual(resp.status_code, 404)
def test_get_handler_with_invalid_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_list = [
{'interfaces': None},
{'interfaces': {}}
]
for nic_meta in meta_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 400)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json_body, [])
def test_get_handler_with_incompleted_iface_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': '', 'mac': '00:00:00:00:00:00'}]},
{'interfaces': [{'mac': '00:00:00:00:00:00'}]},
{'interfaces': [{'name': 'eth0'}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
self.assertEqual(resp.json_body, [])
def test_get_handler_with_invalid_speed_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'current_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'current_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': 10.0}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'current_speed': 10.0}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node['id']}),
headers=self.default_headers
)
ifaces = resp.json_body['meta']['interfaces']
self.assertEqual(
ifaces,
[
{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': None, 'current_speed': None}
]
)
def test_get_handler_without_NICs(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json_body, [])
def test_get_handler_with_NICs(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1},
{'name': 'eth1', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1}])
self.env.create_node(api=True, meta=meta)
node_db = self.env.nodes[0]
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertItemsEqual(
map(lambda i: i['id'], resp.json_body),
map(lambda i: i.id, node_db.interfaces)
)
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
resp.json_body
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_nic_mac_swap(self):
mac_eth0 = '00:11:22:dd:ee:ff'
mac_eth1 = 'aa:bb:cc:33:44:55'
eth0 = {
'name': 'eth0',
'mac': mac_eth0,
'current_speed': 1,
'state': 'up'
}
eth1 = {
'name': 'eth1',
'mac': mac_eth1,
'current_speed': 1,
'state': 'up'
}
# prepare metadata with our interfaces
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [eth0, eth1])
# NOTE(prmtl) hack to have all mac set as we want
# crete_node() will generate random mac for 1st iface
# if we will not set it like that
node_mac = meta['interfaces'][0]['mac']
node = self.env.create_node(api=True, meta=meta, mac=node_mac)
self.env.create_cluster(api=True, nodes=[node['id']])
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
original_nic_info = resp.json
# swap macs, make them uppercase to check that we handle that correctly
eth0['mac'], eth1['mac'] = eth1['mac'].upper(), eth0['mac'].upper()
# update nodes with swapped macs
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [eth0, eth1])
node_data = {'mac': node['mac'], 'meta': new_meta}
self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
# check that networks are assigned to the same interfaces
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
updated_nic_info = resp.json
for orig_iface in original_nic_info:
updated_iface = next(
iface for iface in updated_nic_info
if iface['mac'] == orig_iface['mac'])
self.assertEqual(
orig_iface['assigned_networks'],
orig_iface['assigned_networks'])
# nic names were swapped
self.assertNotEqual(orig_iface['name'], updated_iface['name'])
def test_NIC_updates_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [
{'name': 'new_nic', 'mac': '00:00:00:00:00:00',
'current_speed': 10, 'max_speed': 10, 'state': 'down'}])
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), 1)
resp_nic = resp.json_body[0]
nic = new_meta['interfaces'][0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
self.assertEqual(resp_nic['state'], nic['state'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_NIC_adds_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
meta['interfaces'].append({
'name': 'new_nic', 'mac': '00:00:00:00:00:00'})
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), len(meta['interfaces']))
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
resp.json_body
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'],
nic.get('current_speed'))
self.assertEqual(resp_nic['max_speed'], nic.get('max_speed'))
self.assertEqual(resp_nic['state'], nic.get('state'))
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_ignore_NIC_id_in_meta(self):
fake_id = 'some_data'
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'id': fake_id, 'name': 'eth0', 'mac': '12345'}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertNotEquals(resp.json_body[0]['id'], fake_id)
def test_mac_address_should_be_in_lower_case(self):
meta = self.env.default_metadata()
new_mac = 'AA:BB:CC:DD:11:22'
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': new_mac}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertNotEquals(resp.json_body[0]['mac'], new_mac.lower())
def test_remove_assigned_interface(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return resp.json_body
self.env.create(nodes_kwargs=[{'api': True}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# remove all interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
ifaces = list(nodes_data[0]['meta']['interfaces'])
nodes_data[0]['meta']['interfaces'] = \
[i for i in ifaces if i['name'] == adm_eth.name]
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertEqual(len(nodes_data[0]['meta']['interfaces']), 1)
# restore removed interfaces
nodes_data[0]['meta']['interfaces'] = ifaces
self.app.put(
reverse(
'NodeAgentHandler',
),
jsonutils.dumps({
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}),
headers=self.default_headers,
)
# check node availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertItemsEqual(nodes_data[0]['meta']['interfaces'], ifaces)
def test_change_mac_of_assigned_nics(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return resp.json_body
meta = self.env.default_metadata()
meta["interfaces"] = [
{'name': 'eth0', 'mac': self.env.generate_random_mac()},
{'name': 'eth1', 'mac': self.env.generate_random_mac()},
{'name': 'eth2', 'mac': self.env.generate_random_mac()},
{'name': 'eth3', 'mac': self.env.generate_random_mac()},
{'name': 'eth4', 'mac': self.env.generate_random_mac()},
]
self.env.create(nodes_kwargs=[{'api': True, 'meta': meta}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# change mac address of interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
for iface in nodes_data[0]['meta']['interfaces']:
if iface['name'] != adm_eth.name:
iface['mac'] = self.env.generate_random_mac()
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
| [
"[email protected]"
] | |
f7a5b18fb6689bbd853554059f87649d777be39e | d5214b1331c9dae59d95ba5b3aa3e9f449ad6695 | /qPloneSkinDump/tags/0.7.1/skin_template/Extensions/utils.py | 212ba116cffafeb43b484f18f88823b5577413df | [] | no_license | kroman0/products | 1661ee25a224c4b5f172f98110944f56136c77cf | f359bb64db22f468db5d1e411638790e94d535a2 | refs/heads/master | 2021-01-10T07:58:04.579234 | 2014-06-11T12:05:56 | 2014-06-11T12:05:56 | 52,677,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,200 | py | import os, sys, re, string
from StringIO import StringIO
from time import gmtime, strftime
from zLOG import LOG, INFO
from zExceptions import BadRequest
from App.config import getConfiguration
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.DirectoryView import addDirectoryViews
from Products.%(SKIN_PRODUCT_NAME)s.config import *
######################################################################
## IMPORTING UTILS ##
######################################################################
osp = os.path
ALLOWED_IMPORT_POLICY = ["only_new", "backup", "overwrite"]
INTRO_TO_INSTANCE = "< Started copying object files from Product import directory to Instance one."
SUMMARY_TO_INSTANCE = "> Finished copying."
INTRO_TO_ROOT = "< Started import %%s file[s] with '%%s' policy."
SUMMARY_TO_ROOT = "> Finished importing."
INTRO_CLEAN = "< Started cleaning Instance import directory."
SUMMARY_CLEAN = "> Finished cleaning."
CREXP_INVALID_ID = re.compile('^The id \"(.*?)\" is invalid - it is already in use.$', re.DOTALL|re.IGNORECASE|re.MULTILINE)
################ CHECK IMPORTING ################
def checkIfImport():
""" Return if perform importing, based on checking
*zexp files in <SkinProduct>/import directory.
"""
instance_ipath, product_ipath = getImportedPathes()
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
if product_ilist:
return 1
return 0
################ IMPORTING TO PLONE'S IMPORT DIR ################
def getImportedPathes():
""" Return Plone instance and Skin product import pathes."""
# Based on instance path, construct import pathes
cfg = getConfiguration()
instance_ipath = osp.join(cfg.instancehome, "import")
product_ipath = osp.join(cfg.instancehome, 'Products', PRODUCT_NAME, "import")
# Check presence of Product import directory
if not osp.isdir(product_ipath):
raise BadRequest, "Skin Product's import directory '%%s' - does not exist or is'nt direcory" %% product_ipath
# Check presence of Instance import directory
if not osp.isdir(instance_ipath):
raise BadRequest, "Instance import directory '%%s' - does not exist or isn't direcory" %% instance_ipath
return [instance_ipath, product_ipath]
def copyFile(src_dir, dst_dir, f_name):
""" Copy file from src_dir to dst_dir under original name."""
try:
src_file = open(osp.join(src_dir, f_name),"rb")
dst_file = open(osp.join(dst_dir, f_name),"wb")
dst_file.write(src_file.read())
dst_file.close()
src_file.close()
except Exception, e:
msg = "!!! In copying files from <%%s> dir to <%%s> dir exception occur. Details: %%s." %% (src_dir,dst_dir, str(e))
print >> import_out, msg
LOG('performImportToPortal',INFO,'copyFile', msg)
def moveToTemp(same_instance_files, instance_ipath, temp_dir_path):
""" Move samenamed files from Instanse's dir to temp dir."""
os.mkdir(temp_dir_path) # Create temp back_[date] dir
try:
[copyFile(instance_ipath, temp_dir_path, f_name) for f_name in same_instance_files]
[os.remove(osp.join(instance_ipath, f_name)) for f_name in same_instance_files]
except Exception, e:
msg = "!!! Exception occur during moving files from Instance's dir to temp dir. Detaile:%%s." %% str(e)
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveToTemp', msg)
def copyToInstanceImport():
""" Perform copying imported files from <SkinProduct>/import dir
to Plone's instance import dir.
"""
print >> import_out, INTRO_TO_INSTANCE
instance_ipath, product_ipath = getImportedPathes()
# Compose temp dir back_[date] dir path in Instance import directory
temp_dir_id = "back_%%s" %% strftime("%%Y%%m%%d%%H%%M%%S", gmtime())
temp_dir_path = osp.join(instance_ipath, temp_dir_id)
# Get *.zexp files from Skin Product's import dir and Plone's instance import dir files
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
instance_ilist = [i for i in os.listdir(instance_ipath) \
if osp.isfile(osp.join(instance_ipath,i)) and i.endswith('.zexp')]
# Check for presence samenamed files in Instance and Product import directories.
same_instance_files = [f_name for f_name in instance_ilist if f_name in product_ilist]
if same_instance_files:
moveToTemp(same_instance_files, instance_ipath, temp_dir_path)
# Copy all *zexp files from Product's import dir to Instance's import dir
[copyFile(product_ipath, instance_ipath, f_name) for f_name in product_ilist]
print >> import_out, SUMMARY_TO_INSTANCE
return [instance_ipath, product_ipath, temp_dir_path, product_ilist]
################ IMPORTING TO PORTAL ################
def importObject(portal, file_name):
""" Work around old Zope bug in importing."""
try:
portal.manage_importObject(file_name)
except:
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def makeBackUp(portal, portal_objects, temp_dir_path, obj_id):
""" Perfom backup same named portal objects in temp folder."""
# Get id of temp folder-object
durty_path,temp_id = osp.split(temp_dir_path)
if not temp_id:
durty_path,temp_id = osp.split(durty_path)
# Get temp folder-object
if temp_id not in portal_objects:
portal.invokeFactory('Folder', id=temp_id)
print >> import_out, "! Created '%%s' backup directory with same-ids " \
"objects from portal root." %% temp_id
temp_dir = getattr(portal, temp_id)
# Move object with same id to temp folder-object
get_transaction().commit(1)
obj = portal.manage_cutObjects(ids=[obj_id])
temp_dir.manage_pasteObjects(obj)
print >> import_out, "! '%%s' Object moved from portal root to '%%s' backup directory." %% (obj_id, temp_id)
def performImport(portal, temp_dir_path, file_name):
""" Importing an object to portal."""
portal_objects = portal.objectIds()
try:
portal.manage_importObject(file_name)
except Exception, e:
msg = str(e)
is_invalid_id = CREXP_INVALID_ID.match(msg)
if is_invalid_id:
obj_id = is_invalid_id.group(1)
if IMPORT_POLICY == "only_new":
msg = "! Object with '%%s' id was not importing because it's already exist " \
"in portal root." %% obj_id
print >> import_out, msg
elif IMPORT_POLICY == "backup":
makeBackUp(portal, portal_objects, temp_dir_path, obj_id)
importObject(portal, file_name)
elif IMPORT_POLICY == "overwrite":
portal.manage_delObjects(ids=[obj_id])
importObject(portal, file_name)
else:
# work around old Zope bug in importing
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def importToPortalRoot(portal, product_file_names, temp_dir_path):
""" Import all objects from *zexp files to portal root (based on IMPORT_POLICY)."""
if not IMPORT_POLICY in ALLOWED_IMPORT_POLICY:
raise Exception("%%s - wrong import policy in '%%s/config.py' file. Must be one of the %%s" \
%% (IMPORT_POLICY, PRODUCT_NAME, ALLOWED_IMPORT_POLICY) )
print >> import_out, INTRO_TO_ROOT %% (product_file_names, IMPORT_POLICY)
for file_name in product_file_names:
try:
performImport(portal, temp_dir_path, file_name)
except Exception, error:
msg = '!!! Under "%%s" policy importing exception occur: %%s.' %% (IMPORT_POLICY, str(error))
print >> import_out, msg
LOG('performImportToPortal',INFO,'importToPortalRoot', msg)
print >> import_out, SUMMARY_TO_ROOT
################ CLEANING PLONE'S IMPORT DIR ################
def cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path):
""" Cleaning Plone's import dir."""
print >> import_out, INTRO_CLEAN
# Erase all copied *zexp files from Instance's import dir
for f_name in product_file_names:
f_path = osp.join(instance_ipath, f_name)
if osp.exists(f_path) and osp.isfile(f_path):
os.remove(f_path)
else:
msg = '! "%%s" file was not deleted from "%%s" import directory.' %%\
(f_name, osp.join(instance_ipath))
print >> import_out, msg
LOG('performImportToPortal',INFO,'cleanInstanceImport', msg)
# Move all files from temp back_[date] dir to Instance's import dir
if osp.exists(temp_dir_path) and osp.isdir(temp_dir_path):
f_names = os.listdir(temp_dir_path)
try:
[copyFile(temp_dir_path, instance_ipath, f_name) for f_name in f_names]
[os.remove(osp.join(temp_dir_path, f_name)) for f_name in f_names]
# Erase temp back_[date] dir
os.rmdir(temp_dir_path)
except Exception, e:
msg = "!!! In moving files from temp dir to Instance's import dir exception occur."
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveFromTempToImport', msg)
print >> import_out, SUMMARY_CLEAN
################ MAIN ################
def performImportToPortal(portal):
""" Import objects from Skin Product to Portal root."""
globals()['import_out'] = StringIO()
instance_ipath, product_ipath, temp_dir_path, product_file_names = copyToInstanceImport()
if product_file_names:
importToPortalRoot(portal, product_file_names, temp_dir_path)
cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path)
else:
print >> import_out, "!!! Failure importing: there is no file for importing to be found."
result = import_out
del globals()['import_out']
return result.getvalue()
######################################################################
## INSTALLATION/UNINSTALLATION UTILS ##
######################################################################
CSS_REG_PROPS = ['id', 'expression', 'enabled', 'cookable', 'cacheable' \
,'media', 'rel', 'title', 'rendering', 'compression']
JS_REG_PROPS = ['id', 'expression', 'enabled', 'cookable', 'cacheable' \
,'inline', 'compression']
def installSkin(portal, pp_up, out):
# Checking for presense SKIN_NAME in portal_skins directory view or among Skin Names
skinsTool = getToolByName(portal, 'portal_skins')
# Get unique product_skin_name and remember it in case of differ from SKIN_NAME.
product_skin_name = SKIN_NAME
skin_names = skinsTool.getSkinSelections()
if product_skin_name in skin_names:
idx = 0
while product_skin_name in skin_names:
product_skin_name = SKIN_NAME + str(idx)
idx += 1
addProperty(pp_up, 'q_actual_skin_name', product_skin_name, 'string', out)
# Add directory views
layer_skin_name = string.lower(SKIN_NAME)
addDirectoryViews(skinsTool, 'skins', GLOBALS)
print >> out, "- added '%%s' directory views to portal_skins." %% layer_skin_name
# Get Default skin and remember it for backup on uninstallig
default_skin = skinsTool.getDefaultSkin()
addProperty(pp_up, 'q_default_skin', default_skin, 'string', out)
# Building list of layers for NEW SKIN
base_path = skinsTool.getSkinPath(BASE_SKIN_NAME)
new_path = map( string.strip, string.split(base_path,',') )
if layer_skin_name in new_path :
print >> out, "- %%s layer already present in '%%s' skin." %% (layer_skin_name, BASE_SKIN_NAME)
# Remove layer_skin_name from current position.
del new_path[new_path.index(layer_skin_name)]
# Add layer_skin_name just after 'custom' position
try:
new_path.insert(new_path.index('custom')+1, layer_skin_name)
except ValueError:
new_path.append(layer_skin_name)
new_path = string.join(new_path, ', ')
# Add NEW Skin and set it as dafault
skinsTool.addSkinSelection(product_skin_name, new_path, make_default=1)
print >> out, "Added %%s skin, bassed on %%s and set as default." %% (product_skin_name, BASE_SKIN_NAME)
def uninstallSkin(skinsTool, actual_skin_name, initial_skin):
# Get 'portal_skins' object and list available skin names
# And remove SKIN_NAME from available skins, if it present
skin_names = skinsTool.getSkinSelections()
if actual_skin_name in skin_names :
skinsTool.manage_skinLayers(chosen=(actual_skin_name,), del_skin=1, REQUEST=None)
skin_names.remove(actual_skin_name)
# Remove product skin directory from skins tool
# AND Remove skin-product layer from available skins
skin_layer = SKIN_NAME.lower()
if skin_layer in skinsTool.objectIds():
skinsTool.manage_delObjects(skin_layer)
for skin_name in skin_names:
path = skinsTool.getSkinPath(skin_name)
path = [i.strip() for i in path.split(',')]
if skin_layer in path:
path.remove(skin_layer)
path = ','.join(path)
skinsTool.addSkinSelection(skin_name, path)
# If current default skin == actual_skin_name
# Set default skin in initial one (if initial skin still exist)
# or in 1st from available skin names list.
current_default_skin = skinsTool.getDefaultSkin()
if current_default_skin == actual_skin_name:
if initial_skin in skin_names :
skinsTool.manage_properties(default_skin=initial_skin, REQUEST=None)
elif len(skin_names)>0 :
skinsTool.manage_properties(default_skin=skin_names[0], REQUEST=None)
def addProperty(p_sheet, p_id, p_value, p_type, out):
if p_sheet.hasProperty(p_id):
p_sheet._delProperty(p_id)
p_sheet._setProperty(p_id, p_value, p_type)
print >> out, "... added %%s PropertySheet to %%s." %% (p_id, p_sheet.getId())
def getResourceProperties(obj, prop_list, dflt=''):
""" Return list of 2 items list-[property name, property value]."""
properties=[]
for prop in prop_list:
accessor = getattr(obj, 'get%%s' %% prop.capitalize(), None)
if accessor:
properties.append([prop, accessor() or dflt])
return properties
def registerResource(pp_up, portal_res, resRegisterFunction, out \
,RESOURCE_SKIN_LIST, SKIN_RES_REGDATA, UP_PROPERTY, RES_REG_PROPS):
""" Register resources in portal's registry, remember existant settings."""
# Get original registered resources
portal_res_srings = []
for r in portal_res.getResources():
portal_res_srings.append(";".join(['%%s::%%s'%%(r[0],str(r[1])) \
for r in getResourceProperties(r, RES_REG_PROPS)]))
addProperty(pp_up, UP_PROPERTY, portal_res_srings, 'lines', out)
# Tune Resource registry according to new skin needs
unexistent = [] # list of default resources,
# which present in Skin-product, BUT absent in portal
portal_res_ids = portal_res.getResourceIds()
for res_dict in SKIN_RES_REGDATA:
if res_dict['id'] not in portal_res_ids:
# It's interesting - Resource Registry allow adding unexistent resource - use this
resRegisterFunction(**res_dict)
if res_dict['id'] not in RESOURCE_SKIN_LIST:
unexistent.append(res_dict['id'])
else:
pos = portal_res.getResourcePosition(res_dict['id'])
portal_res.unregisterResource(res_dict['id'])
resRegisterFunction(**res_dict)
portal_res.moveResource(res_dict['id'], pos)
if unexistent:
print >> out, "!!! - BAD: your Resource Regestry have'nt %%s resource(s), which may lead to some problems." %% unexistent
def uninstallResource(portal_res, original_res_list, RESOURCE_SKIN_LIST, resRegisterFunction):
# Prepare Resource Registry data for backup to original state
original_res_regestry = {}
for rec in original_res_list:
resource = {}
[resource.update({prop.split('::')[0]:prop.split('::')[1]}) for prop in rec.split(";")]
original_res_regestry[resource.pop('id')] = resource
# Work up actual Resource Registry
res_dict = portal_res.getResourcesDict()
for res_id in res_dict.keys():
# Remove from Resource Registry Skin product's resources
if res_id in RESOURCE_SKIN_LIST \
and res_id not in original_res_regestry.keys():
portal_res.unregisterResource(res_id)
continue
# Backup 'enabled' property Registry's resourses to it's original state
if original_res_regestry.has_key(res_id):
act_Enabled_state = res_dict[res_id].getEnabled()
orig_Enabled_state = original_res_regestry[res_id]['enabled']
if act_Enabled_state != orig_Enabled_state:
pos = portal_res.getResourcePosition(res_id)
resource = res_dict[res_id]
res = original_res_regestry[res_id]
portal_res.unregisterResource(res_id)
resRegisterFunction(res_id, **res)
portal_res.moveResource(res_id, pos)
def customizeSlots(portal, pp_up, out):
# Get original Site's column lists
orig_left_slots = left_column = list(portal.left_slots)
orig_right_slots = right_column = list(portal.right_slots)
# Save original Site's LEFT and RIGHT slots
addProperty(pp_up, 'q_left_slots', orig_left_slots, 'lines', out)
addProperty(pp_up, 'q_right_slots', orig_right_slots, 'lines', out)
# blend-with-site - to portal's slots adding only new one from skin-porduct
# blend-with-skin - portal slots forming in the following manner:
# first adding skin-porduct's slots, than new one from portal
# replace - to portal's slots forming only from the skin-porduct's slot list
if SLOT_FORMING == "blend_with_skin":
left_column, right_column = formSlotsColumn(LEFT_SLOTS, RIGHT_SLOTS,
orig_left_slots, orig_right_slots, MAIN_COLUMN)
elif SLOT_FORMING == "blend_with_site":
left_column, right_column = formSlotsColumn(orig_left_slots, orig_right_slots,
LEFT_SLOTS, RIGHT_SLOTS, MAIN_COLUMN )
elif SLOT_FORMING == "replace":
left_column, right_column = formSlotsColumn(LEFT_SLOTS, RIGHT_SLOTS, [], [], MAIN_COLUMN)
# REPLACE SITE's column slots
portal.left_slots = tuple(left_column)
portal.right_slots = tuple(right_column)
print >> out, "Complited portal slots customization ..."
# main_column ("left" / "right" / "both") mean which of the MAIN column is favour
def formSlotsColumn(main_left, main_right, slave_left=[], slave_right=[], main_column="both"):
result_left = main_left
result_right = main_right
if main_column == "left":
# 1) APPEND to MAIN_LEFT list *new for main_left column* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
# 3) REMOVE slots from MAIN_RIGHT list, which are *doubled* in MAIN_LEFT
[result_left.append(slot) for slot in slave_left if slot not in result_left]
[result_right.append(slot) for slot in slave_right \
if slot not in result_right and slot not in result_left]
[result_right.remove(slot) for slot in result_left if slot in result_right]
elif main_column == "right":
# 1) APPEND to MAIN_LEFT list *new for main_right column* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
# 3) REMOVE slots from MAIN_LEFT list, which are *doubled* in MAIN_RIGHT
[result_right.append(slot) for slot in slave_right if slot not in result_right]
[result_left.append(slot) for slot in slave_left \
if slot not in result_left and slot not in result_right]
[result_left.remove(slot) for slot in result_right if slot in result_left]
elif main_column == "both":
# 1) APPEND to MAIN_LEFT list *new for both main columns* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
[result_left.append(slot) for slot in slave_left \
if slot not in result_left and slot not in result_right]
[result_right.append(slot) for slot in slave_right \
if slot not in result_right and slot not in result_left]
return [result_left, result_right]
def getProperty(pp, ps, id, default=[]):
""" Get property from portal_properties/[property_sheet]"""
res = default
if ps in pp.objectIds() and pp[ps].hasProperty(id):
res = pp[ps].getProperty(id, default)
return res
| [
"mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946"
] | mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946 |
52a3f85e87f9b45fb9ac69bf0e8ce6265b792311 | 933376c11498a6567da8d7eb7d2675100895c3ba | /pyzoo/zoo/tfpark/text/estimator/bert_classifier.py | 1e804709fef7c112b4e4207be94bb878986e0527 | [
"Apache-2.0"
] | permissive | intel-analytics/analytics-zoo | 320a461765f86d41dd456b598b1cf1d51d57f4c4 | 7cc3e2849057d6429d03b1af0db13caae57960a5 | refs/heads/master | 2023-08-13T20:47:58.621714 | 2023-07-06T00:49:11 | 2023-07-06T00:49:11 | 90,328,920 | 3,104 | 996 | Apache-2.0 | 2023-09-06T01:51:18 | 2017-05-05T02:27:30 | Jupyter Notebook | UTF-8 | Python | false | false | 4,312 | py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.tfpark.text.estimator import *
def make_bert_classifier_model_fn(optimizer):
def _bert_classifier_model_fn(features, labels, mode, params):
"""
Model function for BERTClassifier.
:param features: Dict of feature tensors. Must include the key "input_ids".
:param labels: Label tensor for training.
:param mode: 'train', 'eval' or 'infer'.
:param params: Must include the key "num_classes".
:return: tf.estimator.EstimatorSpec.
"""
import tensorflow as tf
from zoo.tfpark import ZooOptimizer
output_layer = bert_model(features, labels, mode, params).get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [params["num_classes"], hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [params["num_classes"]], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if mode == tf.estimator.ModeKeys.TRAIN:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=probabilities)
else:
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=params["num_classes"], dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, predictions=probabilities,
loss=loss)
else:
train_op = ZooOptimizer(optimizer).minimize(loss)
return tf.estimator.EstimatorSpec(mode=mode, train_op=train_op, loss=loss)
return _bert_classifier_model_fn
class BERTClassifier(BERTBaseEstimator):
"""
A pre-built TFEstimator that takes the hidden state of the first token of BERT
to do classification.
:param num_classes: Positive int. The number of classes to be classified.
:param bert_config_file: The path to the json file for BERT configurations.
:param init_checkpoint: The path to the initial checkpoint of the pre-trained BERT model if any.
Default is None.
:param use_one_hot_embeddings: Boolean. Whether to use one-hot for word embeddings.
Default is False.
:param optimizer: The optimizer used to train the estimator. It should be an instance of
tf.train.Optimizer.
Default is None if no training is involved.
:param model_dir: The output directory for model checkpoints to be written if any.
Default is None.
"""
def __init__(self, num_classes, bert_config_file, init_checkpoint=None,
use_one_hot_embeddings=False, optimizer=None, model_dir=None):
super(BERTClassifier, self).__init__(
model_fn=make_bert_classifier_model_fn(optimizer),
bert_config_file=bert_config_file,
init_checkpoint=init_checkpoint,
use_one_hot_embeddings=use_one_hot_embeddings,
model_dir=model_dir,
num_classes=num_classes)
| [
"[email protected]"
] | |
35034f2bbcde81a629eb94fa9af1209164b3add8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_174/ch37_2020_09_25_14_11_43_800333.py | 148f46760e910d9c6b64d5146199139abafcb9df | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | x=True
while x:
resposta=input("Qual a palavra?")
if resposta=="desisto"
print("Você acertou a senha!") | [
"[email protected]"
] | |
73f56c7dd6bb05eaef9d510dce43223ccdce3a6e | 25a362a695a33a2c5766d56974a541888ac6a783 | /permeability/scripts/Analyze.py | b73d1fc9de7acd6904114fd27edc501922c0ece7 | [
"MIT"
] | permissive | ahy3nz/permeability | 86f5137c73cc8df8755e4c560f297df6051e5b3a | 809b92f15c0a8edb775f95910511079429c5e741 | refs/heads/master | 2020-12-14T08:48:00.434712 | 2018-02-27T22:53:25 | 2018-02-27T22:53:25 | 95,495,251 | 0 | 0 | null | 2018-02-15T23:04:51 | 2017-06-26T22:35:52 | Python | UTF-8 | Python | false | false | 3,292 | py | import os
import permeability as prm
import matplotlib.pyplot as plt
import pickle as pickle
from os import system
import pdb
import numpy as np
import mdtraj as mdt
#data_dir = '/Users/rmhartkamp/Dropbox/PostDoc_2_Vanderbilt/Simulation/Permeability/DSPC_C12OH_3_1'
#data_dir = '/raid6/homes/ahy3nz/Trajectories/Data/11_DSPC_C18OH/DSPC-50_alc18-50_5-4a'
data_dir = os.getcwd()
n_sweeps = 26
#n_sweeps = 30
preamble = "folder"
prm.analyze_sweeps(data_dir, timestep=1.0, verbosity=2, directory_prefix='sweep', n_sweeps=n_sweeps, correlation_length=300)
#prm.analyze_sweeps(data_dir, timestep=1000.0, verbosity=2, directory_prefix='sweep', n_sweeps=n_sweeps)
#forcetime = prm.force_timeseries(data_dir, timestep=2.0, n_windows=40, start_window=15, directory_prefix='sweep')
#prm.plot_timeseries(forcetime['time'], forcetime['forces'])
output = prm.analyze_force_acf_data(data_dir, 305.0, timestep=1, verbosity=2, directory_prefix='sweep',n_sweeps=n_sweeps, kB=1.987e-3)
pickle.dump(output, open('{}/output.p'.format(preamble), 'wb'))
#output = pickle.load(open('output.p', 'rb'))
#pdb.set_trace()
#system('rm *.pdf *.png')
prm.plot_forces(output['z'], output['forces'], fig_filename='{}/forces.pdf'.format(preamble),sweep_alpha=0.2)
prm.plot_free_energy_z(output['z'], output['dG'], fig_filename='{}/delta_G.pdf'.format(preamble))
prm.plot_force_acfs_time(output['time'], output['facf_windows'], fig_filename="{}/force_acf.png".format(preamble), normalize=True)
prm.plot_int_acfs_time(output['time'], output['int_facf_windows'], fig_filename="{}/int-acf.png".format(preamble))
prm.plot_symmetrized_free_energy(output['z'], output['dG_sym'],
output['dG_sym_err'],savefig=True, fig_filename="{}/delG-sym.pdf".format(preamble))
prm.plot_sym_diffusion_coefficient_z(output['z'], output['d_z_sym'],
output['d_z_sym_err'],savefig=True, fig_filename="{}/d-sym_z.pdf".format(preamble))
prm.plot_resistance_z(output['z'], output['R_z'], output['R_z_err'], savefig=True, fig_filename="{}/R_z.pdf".format(preamble))
prm.plot_sym_exp_free_energy(output['z'], output['dG_sym'], output['dG_sym_err'], output['d_z_sym'],
output['d_z_sym_err'], output['R_z'], output['R_z_err'], 305,
fig_filename="{}/expdelG-sym.pdf".format(preamble))
print('Permeability (cm/sec): {} ({})'.format(output['permeability'], output['perm_err']))
#prm.plot_forces(output['z'], output['forces'], fig_filename='forces.pdf',sweep_alpha=0.2)
#prm.plot_free_energy_z(output['z'], output['dG'], fig_filename='delta_G.pdf')
#prm.plot_force_acfs_time(output['time'], output['facf_windows'], normalize=True)
#prm.plot_int_acfs_time(output['time'], output['int_facf_windows'])
#prm.plot_symmetrized_free_energy(output['z'], output['dG_sym'],
# output['dG_sym_err'],savefig=True)
#prm.plot_sym_diffusion_coefficient_z(output['z'], output['d_z_sym'],
# output['d_z_sym_err'],savefig=True)
#prm.plot_resistance_z(output['z'], output['R_z'], output['R_z_err'], savefig=True)
#prm.plot_sym_exp_free_energy(output['z'], output['dG_sym'], output['dG_sym_err'], output['d_z_sym'],
# output['d_z_sym_err'], output['R_z'], output['R_z_err'], 305)
#print('Permeability (cm/sec): {} ({})'.format(output['permeability'], output['perm_err']))
##system('open -a preview *.pdf *.png')
| [
"[email protected]"
] | |
6aaba650193d59b002abe106baec671a25cc7974 | 99f43f4591f63d0c57cd07f07af28c0b554b8e90 | /python/beckjun/백준_2169_로봇 조종하기_백트래킹.py | 566123bd84b9a58d89a268e73e253affbd74684f | [] | no_license | SINHOLEE/Algorithm | 049fa139f89234dd626348c753d97484fab811a7 | 5f39d45e215c079862871636d8e0306d6c304f7e | refs/heads/master | 2023-04-13T18:55:11.499413 | 2023-04-10T06:21:29 | 2023-04-10T06:21:29 | 199,813,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from collections import deque
n, m = map(int, input().split())
mat = [list(map(int, input().split())) for _ in range(n)]
di = (0,0,1)
dj = (1,-1,0)
dp = [[[-1000000001] * 3 for _ in range(m)] for _ in range(n)]
q = deque([(mat[0][0], 0, 0, 0)])
dp[0][0][0] = max(dp[0][0][0], mat[0][0])
c = 0
while q:
cnt, x, y, pre_d = q.popleft()
for k in range(3):
c += 1
newX, newY = x+di[k], y+dj[k]
if pre_d^1 == k:
continue
if not (0 <= newX<n and 0<= newY<m):
continue
if dp[newX][newY][k] < cnt + mat[newX][newY]:
dp[newX][newY][k] = cnt + mat[newX][newY]
q.append((cnt + mat[newX][newY], newX, newY, k))
print(max(dp[n-1][m-1]),c) | [
"[email protected]"
] | |
fb05e50ae82b67f2faf5468a91a946c9c47ed1ab | 193b261c4e5a893a5798ade17dea172af0be8dd4 | /tools/code_coverage/package/tool/parser/llvm_coverage_segment.py | 17d7c18975ff94a6a24b56b5638cfea90b9a0633 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | ahirner/pytorch | 616291823ddab5def3d35d5bd5693d62789bd710 | fb620a27d08fc5ad00b386505e23e2a51f02366b | refs/heads/master | 2021-07-25T14:19:44.053789 | 2021-06-10T19:25:58 | 2021-06-10T19:29:57 | 153,104,181 | 1 | 2 | NOASSERTION | 2021-06-12T06:35:46 | 2018-10-15T11:51:36 | C++ | UTF-8 | Python | false | false | 1,975 | py | from typing import List, NamedTuple, Optional, Tuple
class LlvmCoverageSegment(NamedTuple):
line: int
col: int
segment_count: int
has_count: int
is_region_entry: int
is_gap_entry: Optional[int]
@property
def has_coverage(self) -> bool:
return self.segment_count > 0
@property
def is_executable(self) -> bool:
return self.has_count > 0
def get_coverage(
self, prev_segment: "LlvmCoverageSegment"
) -> Tuple[List[int], List[int]]:
# Code adapted from testpilot.testinfra.runners.gtestcoveragerunner.py
if not prev_segment.is_executable:
return [], []
# this segment ends at the line if col == 1
# (so segment effectively ends on the line) and
# line+1 if col is > 1 (so it touches at least some part of last line).
end_of_segment = self.line if self.col == 1 else self.line + 1
lines_range = list(range(prev_segment.line, end_of_segment))
return (lines_range, []) if prev_segment.has_coverage else ([], lines_range)
def parse_segments(raw_segments: List[List[int]]) -> List[LlvmCoverageSegment]:
"""
Creates LlvmCoverageSegment from a list of lists in llvm export json.
each segment is represented by 5-element array.
"""
ret: List[LlvmCoverageSegment] = []
for raw_segment in raw_segments:
assert (
len(raw_segment) == 5 or len(raw_segment) == 6
), "list is not compatible with llvmcom export:"
" Expected to have 5 or 6 elements"
if len(raw_segment) == 5:
ret.append(
LlvmCoverageSegment(
raw_segment[0],
raw_segment[1],
raw_segment[2],
raw_segment[3],
raw_segment[4],
None,
)
)
else:
ret.append(LlvmCoverageSegment(*raw_segment))
return ret
| [
"[email protected]"
] | |
ac86c3b861f846bc1a6d184ec38203ecdc991814 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L46/46-32_wat_20Abox/set_5.py | a41d48053aff78204b6fa52c4c6467f61d81071f | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L46/wat_20Abox/ti_one-step/46_32/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
fd9f613fe8dd68c55baa2d858595c5ba3d42bf88 | 8576b91843e77d5ffefe973280fe62e9da3e27b3 | /manipulation/regrasp/regriptppass.py | 083a666645288e3ebac831cfe50c104243e80176 | [] | no_license | MiaoLi/pyhiro | aa335164bb3482d6f070e0c767dad5335585702a | 77e42ce2247a969f91e7cbae48e5e904459785cc | refs/heads/master | 2021-01-23T06:15:17.315529 | 2017-04-11T23:25:15 | 2017-04-11T23:25:15 | 93,016,532 | 1 | 0 | null | 2017-06-01T04:30:31 | 2017-06-01T04:30:31 | null | UTF-8 | Python | false | false | 33,527 | py | #!/usr/bin/python
import os
import itertools
import MySQLdb as mdb
import numpy as np
from manipulation.grip.robotiq85 import rtq85nm
from panda3d.bullet import BulletWorld
from panda3d.core import *
from shapely.geometry import LinearRing
from shapely.geometry import Point
from shapely.geometry import Polygon
import pandaplotutils.pandactrl as pandactrl
import pandaplotutils.pandageom as pandageom
import trimesh
from pandaplotutils import pandageom as pg
from utils import collisiondetection as cd
from utils import dbcvt as dc
from utils import robotmath as rm
import matplotlib.pyplot as plt
from matplotlib import collections as mc
from mpl_toolkits.mplot3d import art3d as mc3d
from operator import add
from robotsim.nextage import nextage
from robotsim.hrp5 import hrp5
from database import dbaccess as db
import networkx as nx
import math
import random
import floatingposes
from manipulation.assembly.asstwoobj import TwoObjAss as Toass
class GraphTpp(object):
def __init__(self, objpath, robot, handpkg, gdb, armname):
self.objtrimesh=trimesh.load_mesh(objpath)
# for dbaccess
self.dbobjname = os.path.splitext(os.path.basename(objpath))[0]
# regg = regrip graph
self.regg = nx.Graph()
self.gdb = gdb
self.robot = robot
self.handpkg = handpkg
# load retraction distances
self.rethandx, self.retworldz, self.retworlda, self.worldz = self.gdb.loadIKRet()
# worlda is default for the general grasps on table top
# for assembly at start and goal, worlda is computed by assembly planner
self.worlda = Vec3(0,0,1)
self.globalgripids = []
self.fttpsids = []
self.nfttps = 0
self.gnodesplotpos = {}
self.gdb = gdb
self.robot = robot
self.handpkg = handpkg
self.__loadGripsToBuildGraph(armname)
def __loadGripsToBuildGraph(self, armname = "rgt"):
"""
load tabletopgrips
retraction distance are also loaded from database
:param robot: an robot defined in robotsim.hrp5 or robotsim.nextage
:param gdb: an object of the database.GraspDB class
:param idarm: value = 1 "lft" or 2 "rgt", which arm to use
:return:
author: weiwei
date: 20170112
"""
# load idarm
idarm = self.gdb.loadIdArm(armname)
# get the global grip ids
# and prepare the global edges
# for each globalgripid, find all its tabletopids (pertaining to placements)
globalidsedges = {}
sql = "SELECT idfreeairgrip FROM freeairgrip,object WHERE freeairgrip.idobject=object.idobject AND \
object.name LIKE '%s'" % self.dbobjname
result = self.gdb.execute(sql)
if len(result) == 0:
raise ValueError("Plan freeairgrip first!")
for ggid in result:
globalidsedges[str(ggid[0])] = []
self.globalgripids.append(ggid[0])
sql = "SELECT tabletopplacements.idtabletopplacements, angle.value, \
tabletopplacements.idfreetabletopplacement, tabletopplacements.tabletopposition, \
tabletopplacements.rotmat FROM \
tabletopplacements,freetabletopplacement,angle,object WHERE \
tabletopplacements.idangle=angle.idangle AND \
tabletopplacements.idfreetabletopplacement=freetabletopplacement.idfreetabletopplacement AND \
freetabletopplacement.idobject=object.idobject AND \
object.name LIKE '%s' AND angle.value IN (0.0, 45.0, 90.0, 135.0, 180.0, 225.0, 270.0, 315.0)" \
% self.dbobjname
result = self.gdb.execute(sql)
if len(result) != 0:
tpsrows = np.array(result)
# nubmer of discreted rotation
self.angles = list(set(map(float, tpsrows[:,1])))
# for plotting
self.fttpsids = list(set(map(int, tpsrows[:,2])))
self.nfttps = len(self.fttpsids)
idrobot = self.gdb.loadIdRobot(self.robot)
for i, idtps in enumerate(tpsrows[:,0]):
sql = "SELECT tabletopgrips.idtabletopgrips, tabletopgrips.contactpnt0, tabletopgrips.contactpnt1, \
tabletopgrips.rotmat, tabletopgrips.jawwidth, tabletopgrips.idfreeairgrip \
FROM tabletopgrips,ik,freeairgrip,hand WHERE tabletopgrips.idfreeairgrip = freeairgrip.idfreeairgrip AND \
freeairgrip.idhand = hand.idhand AND\
tabletopgrips.idtabletopgrips=ik.idtabletopgrips AND \
tabletopgrips.idtabletopplacements = %d AND ik.idrobot=%d AND \
ik.feasibility='True' AND ik.feasibility_handx='True' AND ik.feasibility_handxworldz='True' \
AND ik.feasibility_worlda='True' AND ik.feasibility_worldaworldz='True' AND ik.idarm = %d \
AND hand.name LIKE '%s'" \
% (int(idtps), idrobot, idarm, self.handpkg.getHandName())
resultttgs = self.gdb.execute(sql)
if len(resultttgs)==0:
continue
localidedges = []
for ttgsrow in resultttgs:
ttgsid = int(ttgsrow[0])
ttgscct0 = dc.strToV3(ttgsrow[1])
ttgscct1 = dc.strToV3(ttgsrow[2])
ttgsrotmat = dc.strToMat4(ttgsrow[3])
ttgsjawwidth = float(ttgsrow[4])
ttgsidfreeair = int(ttgsrow[5])
ttgsfgrcenter = (ttgscct0+ttgscct1)/2
handx = ttgsrotmat.getRow3(0)
ttgsfgrcenterhandx = ttgsfgrcenter + handx*self.rethandx
ttgsfgrcenterhandxworldz = ttgsfgrcenterhandx + self.worldz*self.retworldz
ttgsfgrcenterworlda = ttgsfgrcenter + self.worlda*self.retworlda
ttgsfgrcenterworldaworldz = ttgsfgrcenterworlda+ self.worldz*self.retworldz
ttgsfgrcenternp = pg.v3ToNp(ttgsfgrcenter)
ttgsfgrcenternp_handx = pg.v3ToNp(ttgsfgrcenterhandx)
ttgsfgrcenternp_handxworldz = pg.v3ToNp(ttgsfgrcenterhandxworldz)
ttgsfgrcenternp_worlda = pg.v3ToNp(ttgsfgrcenterworlda)
ttgsfgrcenternp_worldaworldz = pg.v3ToNp(ttgsfgrcenterworldaworldz)
ttgsrotmat3np = pg.mat3ToNp(ttgsrotmat.getUpper3())
objrotmat4 = dc.strToMat4(tpsrows[:,4][i])
objrotmat4worlda = Mat4(objrotmat4)
objrotmat4worlda.setRow(3, objrotmat4.getRow3(3)+self.worlda*self.retworlda)
objrotmat4worldaworldz = Mat4(objrotmat4worlda)
objrotmat4worldaworldz.setRow(3, objrotmat4worlda.getRow3(3)+self.worldz*self.retworldz)
self.regg.add_node(armname+str(ttgsid), fgrcenter=ttgsfgrcenternp,
fgrcenterhandx = ttgsfgrcenternp_handx,
fgrcenterhandxworldz = ttgsfgrcenternp_handxworldz,
fgrcenterworlda = ttgsfgrcenternp_worlda,
fgrcenterworldaworldz = ttgsfgrcenternp_worldaworldz,
jawwidth=ttgsjawwidth, hndrotmat3np=ttgsrotmat3np,
globalgripid = ttgsidfreeair, freetabletopplacementid = int(tpsrows[:,2][i]),
tabletopplacementrotmat = objrotmat4,
tabletopplacementrotmathandx = objrotmat4,
tabletopplacementrotmathandxworldz = objrotmat4,
tabletopplacementrotmatworlda = objrotmat4worlda,
tabletopplacementrotmatworldaworldz = objrotmat4worldaworldz,
angle = float(tpsrows[:,1][i]), tabletopposition = dc.strToV3(tpsrows[:,3][i]))
globalidsedges[str(ttgsidfreeair)].append(armname+str(ttgsid))
localidedges.append(armname+str(ttgsid))
# print list(itertools.combinations(ttgrows[:,0], 2))
for edge in list(itertools.combinations(localidedges, 2)):
self.regg.add_edge(*edge, weight=1, edgetype = 'transit')
if len(globalidsedges) == 0:
raise ValueError("Plan tabletopgrips first!")
for globalidedgesid in globalidsedges.keys():
for edge in list(itertools.combinations(globalidsedges[globalidedgesid], 2)):
self.regg.add_edge(*edge, weight=1, edgetype = 'transfer')
# gen plot pos
# biggest circle: grips; big circle: rotation; small circle: placements
radiusplacement = 30
radiusrot = 6
radiusgrip = 1
xyplacementspos = {}
xydiscreterotspos = {}
xyzglobalgrippos = {}
for i, ttpsid in enumerate(self.fttpsids):
xydiscreterotspos[ttpsid] = {}
xyzglobalgrippos[ttpsid] = {}
xypos = [radiusplacement * math.cos(2 * math.pi / self.nfttps * i),
radiusplacement * math.sin(2 * math.pi / self.nfttps * i)]
xyplacementspos[ttpsid] = xypos
for j, anglevalue in enumerate(self.angles):
xyzglobalgrippos[ttpsid][anglevalue] = {}
xypos = [radiusrot * math.cos(math.radians(anglevalue)), radiusrot * math.sin(math.radians(anglevalue))]
xydiscreterotspos[ttpsid][anglevalue] = \
[xyplacementspos[ttpsid][0] + xypos[0], xyplacementspos[ttpsid][1] + xypos[1]]
for k, globalgripid in enumerate(self.globalgripids):
xypos = [radiusgrip * math.cos(2 * math.pi / len(self.globalgripids) * k),
radiusgrip * math.sin(2 * math.pi / len(self.globalgripids) * k)]
xyzglobalgrippos[ttpsid][anglevalue][globalgripid] = \
[xydiscreterotspos[ttpsid][anglevalue][0] + xypos[0],
xydiscreterotspos[ttpsid][anglevalue][1] + xypos[1], 0]
for nid in self.regg.nodes():
fttpid = self.regg.node[nid]['freetabletopplacementid']
anglevalue = self.regg.node[nid]['angle']
ggid = self.regg.node[nid]['globalgripid']
tabletopposition = self.regg.node[nid]['tabletopposition']
xyzpos = map(add, xyzglobalgrippos[fttpid][anglevalue][ggid],
[tabletopposition[0], tabletopposition[1], tabletopposition[2]])
self.gnodesplotpos[nid] = xyzpos[:2]
def plotGraph(self, pltax, offset = [0,0]):
"""
:param pltax:
:param offset: where to plot the graph
:return:
"""
# add offset
for i in self.gnodesplotpos.keys():
self.gnodesplotpos[i] = map(add, self.gnodesplotpos[i], offset)
transitedges = []
transferedges = []
for nid0, nid1, reggedgedata in self.regg.edges(data=True):
if reggedgedata['edgetype'] is 'transit':
transitedges.append([self.gnodesplotpos[nid0][:2], self.gnodesplotpos[nid1][:2]])
if reggedgedata['edgetype'] is 'transfer':
transferedges.append([self.gnodesplotpos[nid0][:2], self.gnodesplotpos[nid1][:2]])
transitec = mc.LineCollection(transitedges, colors=[0,1,1,1], linewidths=1)
transferec = mc.LineCollection(transferedges, colors=[0,0,0,.1], linewidths=1)
pltax.add_collection(transferec)
pltax.add_collection(transitec)
class RegripTpp(object):
def __init__(self, objpath, nxtrobot, handpkg, gdb, armname):
self.graphtpp = GraphTpp(objpath, nxtrobot, handpkg, gdb, armname)
self.armname = armname
self.gdb = gdb
self.robot = nxtrobot
self.hand = handpkg.newHandNM(hndcolor=[1, 0, 0, .1])
# plane to remove hand
self.bulletworld = BulletWorld()
self.planebullnode = cd.genCollisionPlane(offset = -53)
self.bulletworld.attachRigidBody(self.planebullnode)
# add tabletop plane model to bulletworld
this_dir, this_filename = os.path.split(__file__)
# TODO: change the shape of nxt.egg
ttpath = Filename.fromOsSpecific(os.path.join(os.path.split(this_dir)[0]+os.sep, "grip", "supports", "tabletop_nxt.egg"))
self.ttnodepath = NodePath("tabletop")
ttl = loader.loadModel(ttpath)
ttl.instanceTo(self.ttnodepath)
self.endnodeids = []
# load retraction distances
self.rethandx, self.retworldz, self.retworlda, self.worldz = self.gdb.loadIKRet()
# worlda is default for the general grasps on table top
# for assembly at start and goal, worlda is computed by assembly planner
self.worlda = Vec3(0,0,1)
self.gnodesplotpos = {}
self.freegripid = []
self.freegripcontacts = []
self.freegripnormals = []
self.freegriprotmats = []
self.freegripjawwidth = []
# for start and goal grasps poses:
self.radiusgrip = 1
self.__xyzglobalgrippos_startgoal={}
for k, globalgripid in enumerate(self.graphtpp.globalgripids):
xypos = [self.radiusgrip * math.cos(2 * math.pi / len(self.graphtpp.globalgripids) * k),
self.radiusgrip * math.sin(2 * math.pi / len(self.graphtpp.globalgripids) * k)]
self.__xyzglobalgrippos_startgoal[globalgripid] = [xypos[0],xypos[1],0]
self.__loadFreeAirGrip()
@property
def dbobjname(self):
# read-only property
return self.graphtpp.dbobjname
def __loadFreeAirGrip(self):
"""
load self.freegripid, etc. from mysqldatabase
:param gdb: an object of the database.GraspDB class
:return:
author: weiwei
date: 20170110
"""
freeairgripdata = self.gdb.loadFreeAirGrip(self.dbobjname)
if freeairgripdata is None:
raise ValueError("Plan the freeairgrip first!")
self.freegripid = freeairgripdata[0]
self.freegripcontacts = freeairgripdata[1]
self.freegripnormals = freeairgripdata[2]
self.freegriprotmats = freeairgripdata[3]
self.freegripjawwidth = freeairgripdata[4]
def addEnd(self, rotmat4):
# the node id of a globalgripid in end
nodeidofglobalidinend = {}
# the endnodeids is also for quick access
self.endnodeids = []
for j, rotmat in enumerate(self.freegriprotmats):
grotmat = rotmat * rotmat4
# for collision detection, we move the object back to x=0,y=0
objrotmatx0y0 = Mat4(rotmat4)
objrotmatx0y0.setCell(3,0,0)
objrotmatx0y0.setCell(3,1,0)
grotmatx0y0 = rotmat * objrotmatx0y0
# check if the hand collide with tabletop
tmphand = self.hand
initmat = tmphand.getMat()
initjawwidth = tmphand.jawwidth
# set jawwidth to 80 to avoid collision with surrounding obstacles
# set to gripping with is unnecessary
# tmphand.setJawwidth(self.freegripjawwidth[j])
tmphand.setJawwidth(tmphand.jawwidthopen)
tmphand.setMat(grotmatx0y0)
# add hand model to bulletworld
hndbullnode = cd.genCollisionMeshMultiNp(tmphand.handnp)
result = self.bulletworld.contactTest(hndbullnode)
if not result.getNumContacts():
gcct0=rotmat4.xformPoint(self.freegripcontacts[j][0])
gcct1=rotmat4.xformPoint(self.freegripcontacts[j][1])
handx = grotmat.getRow3(0)
# panda3d format
gfgrcenter = (gcct0+gcct1)/2
gfgrcenterhandx = gfgrcenter + handx*self.rethandx
# handxworldz is not necessary for start
# gfgrcenterhandxworldz = gfgrcenterhandx + self.worldz*self.retworldz
gfgrcenterworlda = gfgrcenter + self.worlda*self.retworlda
gfgrcenterworldaworldz = gfgrcenterworlda+ self.worldz*self.retworldz
gjawwidth = self.freegripjawwidth[j]
gidfreeair = self.freegripid[j]
# numpy format
gfgrcenternp = pg.v3ToNp(gfgrcenter)
gfgrcenternp_handx = pg.v3ToNp(gfgrcenterhandx)
# handxworldz is not necessary for start
# gfgrcenternp_handxworldz = pg.v3ToNp(gfgrcenterhandxworldz)
gfgrcenternp_worlda = pg.v3ToNp(gfgrcenterworlda)
gfgrcenternp_worldaworldz = pg.v3ToNp(gfgrcenterworldaworldz)
grotmat3np = pg.mat3ToNp(grotmat.getUpper3())
ikc = self.robot.numikr(gfgrcenternp, grotmat3np, armid = self.armname)
ikcx = self.robot.numikr(gfgrcenternp_handx, grotmat3np, armid = self.armname)
ikca = self.robot.numikr(gfgrcenternp_worlda, grotmat3np, armid = self.armname)
ikcaz = self.robot.numikr(gfgrcenternp_worldaworldz, grotmat3np, armid = self.armname)
if (ikc is not None) and (ikcx is not None) and (ikca is not None) and (ikcaz is not None):
# note the tabletopposition here is not the contact for the intermediate states
# it is the zero pos
tabletopposition = rotmat4.getRow3(3)
rotmat4worlda = Mat4(rotmat4)
rotmat4worlda.setRow(3, rotmat4.getRow3(3)+self.worlda*self.retworlda)
rotmat4worldaworldz = Mat4(rotmat4worlda)
rotmat4worldaworldz.setRow(3, rotmat4worlda.getRow3(3)+self.worldz*self.retworldz)
self.graphtpp.regg.add_node('end'+self.armname+str(j), fgrcenter=gfgrcenternp,
fgrcenterhandx = gfgrcenternp_handx,
fgrcenterhandxworldz = 'na',
fgrcenterworlda = gfgrcenternp_worlda,
fgrcenterworldaworldz = gfgrcenternp_worldaworldz,
jawwidth=gjawwidth, hndrotmat3np=grotmat3np,
globalgripid = gidfreeair, freetabletopplacementid = 'na',
tabletopplacementrotmat = rotmat4,
tabletopplacementrotmathandx = rotmat4,
tabletopplacementrotmathandxworldz = 'na',
tabletopplacementrotmatworlda = rotmat4worlda,
tabletopplacementrotmatworldaworldz = rotmat4worldaworldz,
angle = 'na', tabletopposition = tabletopposition)
nodeidofglobalidinend[gidfreeair]='end'+self.armname+str(j)
self.endnodeids.append('end'+self.armname+str(j))
tmphand.setMat(initmat)
tmphand.setJawwidth(initjawwidth)
if len(self.endnodeids) == 0:
raise ValueError("No available end grip at " + self.armname)
# add start transit edge
for edge in list(itertools.combinations(self.endnodeids, 2)):
self.graphtpp.regg.add_edge(*edge, weight = 1, edgetype = 'endtransit')
# add start transfer edge
for reggnode, reggnodedata in self.graphtpp.regg.nodes(data=True):
if reggnode.startswith(self.armname):
globalgripid = reggnodedata['globalgripid']
if globalgripid in nodeidofglobalidinend.keys():
endnodeid = nodeidofglobalidinend[globalgripid]
self.graphtpp.regg.add_edge(endnodeid, reggnode, weight=1, edgetype = 'endtransfer')
for nid in self.graphtpp.regg.nodes():
if nid.startswith('end'):
ggid = self.graphtpp.regg.node[nid]['globalgripid']
tabletopposition = self.graphtpp.regg.node[nid]['tabletopposition']
xyzpos = map(add, self.__xyzglobalgrippos_startgoal[ggid],
[tabletopposition[0], tabletopposition[1], tabletopposition[2]])
self.gnodesplotpos[nid] = xyzpos[:2]
def deleteEnd(self):
for nid in list(self.graphtpp.regg.nodes()):
if nid.startswith('end'):
self.graphtpp.regg.remove_node(nid)
def plotGraph(self, pltax, gtppoffset = [0,0]):
"""
:param pltax:
:param endname:
:param gtppoffset: where to plot graphtpp, see the plotGraph function of GraphTpp class
:return:
"""
self.graphtpp.plotGraph(pltax, offset = gtppoffset)
self.__plotEnds(pltax)
def __plotEnds(self, pltax):
transitedges = []
transferedges = []
for nid0, nid1, reggedgedata in self.graphtpp.regg.edges(data=True):
if nid0.startswith('end'):
pos0 = self.gnodesplotpos[nid0][:2]
else:
pos0 = self.graphtpp.gnodesplotpos[nid0][:2]
if nid1.startswith('end'):
pos1 = self.gnodesplotpos[nid1][:2]
else:
pos1 = self.graphtpp.gnodesplotpos[nid1][:2]
if (reggedgedata['edgetype'] == 'endtransit'):
transitedges.append([pos0, pos1])
elif (reggedgedata['edgetype'] is 'endtransfer'):
transferedges.append([pos0, pos1])
transitec = mc.LineCollection(transitedges, colors = [.5,0,0,.3], linewidths = 1)
transferec = mc.LineCollection(transferedges, colors = [1,0,0,.3], linewidths = 1)
pltax.add_collection(transferec)
pltax.add_collection(transitec)
class RegripTppAss(object):
def __init__(self, base, obj0path, obj0Mat4, obj1path, obj1Mat4, assDirect1to0, gdb, robot, handpkg):
"""
see parameters of assembly/asstwoobj
:param base:
:param obj0path:
:param obj0Mat4:
:param obj1path:
:param obj1Mat4:
:param assDirect1to0:
:param gdb:
:param robot:
:param handpkg:
author: weiwei
date: 20160308
"""
self.robot = robot
self.robothand = handpkg.newHandNM(hndcolor=[1, 0, 0, .1])
self.toass = Toass(base, obj0path, obj0Mat4, obj1path, obj1Mat4, assDirect1to0, gdb, handpkg)
self.toass.loadIKFeasibleAGPairsFromDB(robot)
self.regghalf = [RegripTpp(obj0path, robot, handpkg, gdb, 'rgt'), RegripTpp(obj1path, robot, handpkg, gdb, 'lft')]
self.objrotmat4s = [obj0Mat4, obj1Mat4]
self.retass = [assDirect1to0, -assDirect1to0]
self.regg = []
self.gnodesplotpos = {}
self.composedgnodesplotpos = {}
self.directshortestpaths = []
def findshortestpath(self, obj0SRotmat4, obj1SRotmat4):
self.__addEnds(obj0SRotmat4, obj1SRotmat4)
# startrgt goalrgt
if len(self.regghalf[0].endnodeids) > 0 and len(self.regghalf[1].endnodeids) > 0:
startgrip = self.regghalf[0].endnodeids[0]
goalgrip = self.regghalf[1].endnodeids[0]
shortestpaths = nx.all_shortest_paths(self.regg, source = startgrip, target = goalgrip)
self.directshortestpaths = []
print shortestpaths
for path in shortestpaths:
for i, pathnode in enumerate(path):
if pathnode.startswith('endrgt') and i < len(path)-1:
continue
else:
self.directshortestpaths.append(path[i-1:])
break
for i, pathnode in enumerate(self.directshortestpaths[-1]):
if i > 0 and pathnode.startswith('endlft'):
self.directshortestpaths[-1]=self.directshortestpaths[-1][:i+1]
break
def plotGraph(self, pltax, offset0 = [600, -800], offset1 = [600, 800]):
self.regghalf[0].plotGraph(pltax, offset0)
self.regghalf[1].plotGraph(pltax, offset1)
# # add offset
for nid in self.gnodesplotpos.keys():
if nid.startswith('assrgt'):
self.gnodesplotpos[nid] = map(add, self.gnodesplotpos[nid], [offset0[0], 0])
if nid.startswith('asslft'):
self.gnodesplotpos[nid] = map(add, self.gnodesplotpos[nid], [offset1[0], 0])
# make composed gnodesplotpos
self.composedgnodesplotpos = {}
for key in self.gnodesplotpos.keys():
self.composedgnodesplotpos[key] = self.gnodesplotpos[key]
for key in self.regghalf[0].gnodesplotpos.keys():
self.composedgnodesplotpos[key] = self.regghalf[0].gnodesplotpos[key]
for key in self.regghalf[0].graphtpp.gnodesplotpos.keys():
self.composedgnodesplotpos[key] = self.regghalf[0].graphtpp.gnodesplotpos[key]
for key in self.regghalf[1].gnodesplotpos.keys():
self.composedgnodesplotpos[key] = self.regghalf[1].gnodesplotpos[key]
for key in self.regghalf[1].graphtpp.gnodesplotpos.keys():
self.composedgnodesplotpos[key] = self.regghalf[1].graphtpp.gnodesplotpos[key]
transitedges = []
transferedges = []
for nid0, nid1, reggedgedata in self.regg.edges(data=True):
if reggedgedata['edgetype'] is 'asstransit':
transitedges.append([self.composedgnodesplotpos[nid0][:2], self.composedgnodesplotpos[nid1][:2]])
if reggedgedata['edgetype'] is 'asstransfer':
transferedges.append([self.composedgnodesplotpos[nid0][:2], self.composedgnodesplotpos[nid1][:2]])
transitec = mc.LineCollection(transitedges, colors=[1,0,1,1], linewidths=1)
transferec = mc.LineCollection(transferedges, colors=[.5,.5,0,.03], linewidths=1)
pltax.add_collection(transferec)
pltax.add_collection(transitec)
def plotshortestpath(self, pltax, id=0):
"""
plot the shortest path
:param id:
:return:
"""
for i,path in enumerate(self.directshortestpaths):
if i is id:
pathedges = []
pathlength = len(path)
for pnidx in range(pathlength-1):
nid0 = path[pnidx]
nid1 = path[pnidx+1]
pathedges.append([self.composedgnodesplotpos[nid0][:2], self.composedgnodesplotpos[nid1][:2]])
pathedgesec = mc.LineCollection(pathedges, colors=[0, 1, 0, 1], linewidths=5)
pltax.add_collection(pathedgesec)
def __addEnds(self, obj0SRotmat4, obj1SRotmat4):
"""
add the two ends to the graph
:param obj0SRotmat4:
:param obj1SRotmat4:
:return:
"""
self.regghalf[0].deleteEnd()
self.regghalf[1].deleteEnd()
self.regghalf[0].addEnd(obj0SRotmat4)
self.regghalf[1].addEnd(obj1SRotmat4)
self.regg = nx.compose(self.regghalf[0].graphtpp.regg, self.regghalf[1].graphtpp.regg)
self.__addAssNodes(armname = 'rgt')
self.__addAssNodes(armname = 'lft')
self.__bridgeGraph()
def __addAssNodes(self, armname = 'rgt'):
iele = 0
if armname == 'lft':
iele = 1
freeairidontpp = {}
# for plot
radiusgrip = self.regghalf[iele].radiusgrip
for nid in self.regghalf[iele].graphtpp.regg.nodes():
dictind = str(self.regghalf[iele].graphtpp.regg.node[nid]['globalgripid'])
if dictind in freeairidontpp:
freeairidontpp[dictind].append(nid)
else:
freeairidontpp[dictind] = []
# add floatingposes
freeairidonass = {}
for asspind, assprotmat4 in enumerate(self.toass.gridsfloatingposemat4s):
retass = assprotmat4.xformVec(self.retass[iele])
for pairind, hndrotmat4pair in enumerate(self.toass.icoassgrippairshndmat4s[asspind]):
assgid = self.toass.icoassgrippairsids[asspind][pairind][iele]
assgidfreeair = self.toass.icoassgrippairsidfreeairs[asspind][pairind][iele]
ccts = self.toass.icoassgrippairscontacts[asspind][pairind][iele]
hndrotmat4 = hndrotmat4pair[iele]
asspgfgrcenter = (Vec3(ccts[0][0], ccts[0][1], ccts[0][2]) + Vec3(ccts[1][0], ccts[1][1], ccts[1][2])) / 2
asspgfgrcenter_retass = asspgfgrcenter + retass
asspgfgrcenternp = pg.v3ToNp(asspgfgrcenter)
asspgfgrcenter_retassnp = pg.v3ToNp(asspgfgrcenter_retass)
jawwidth = self.toass.icoassgrippairsjawwidths[asspind][pairind][iele]
hndrotmat3np = pg.mat3ToNp(hndrotmat4.getUpper3())
objrotmat4 = self.objrotmat4s[iele]*assprotmat4
objrotmat4retass = Mat4(objrotmat4)
objrotmat4retass.setRow(3, objrotmat4retass.getRow3(3)+retass)
self.regg.add_node('ass' + armname + str(assgid), fgrcenter=asspgfgrcenternp,
fgrcenterretass=asspgfgrcenter_retassnp, jawwidth=jawwidth,
hndrotmat3np=hndrotmat3np, assposerotmat4=objrotmat4,
assposerotmat4retass=objrotmat4retass, assposeind=asspind,
icoassgrippairsid=pairind, globalgripid=assgidfreeair)
if str(assgidfreeair) in freeairidonass:
freeairidonass[str(assgidfreeair)].append('ass' + armname + str(assgid))
else:
freeairidonass[str(assgidfreeair)] = []
for freeairidontppkey in freeairidontpp.keys():
try:
for edge in list(itertools.product(freeairidontpp[freeairidontppkey], freeairidonass[freeairidontppkey])):
self.regg.add_edge(*edge, weight=1, edgetype='asstransfer')
except:
pass
# plot pos
nfp = len(self.toass.gridsfloatingposemat4s)
xdist = 10
x = range(300,501,xdist)
y = range(-50,50,100*xdist/nfp)
for nid in self.regg.nodes():
if nid.startswith('ass'):
asspind = self.regg.node[nid]['assposeind']
assgind = self.regg.node[nid]['icoassgrippairsid']
nassg = len(self.toass.icoassgrippairshndmat4s[asspind])
xpos = x[asspind % len(x)]
ypos = y[asspind/len(x)]
xyzpos = [radiusgrip * math.cos(2 * math.pi / nassg * assgind)+xpos,
radiusgrip * math.sin(2 * math.pi / nassg * assgind)+ypos, 0]
self.gnodesplotpos[nid] = xyzpos[:2]
if nid.startswith('assrgt'):
self.gnodesplotpos[nid][1] = self.gnodesplotpos[nid][1] - 100
elif nid.startswith('asslft'):
self.gnodesplotpos[nid][1] = self.gnodesplotpos[nid][1] + 100
def __bridgeGraph(self):
for fpind, objrotmat4 in enumerate(self.toass.gridsfloatingposemat4s):
for pairind, hndrotmat4pair in enumerate(self.toass.icoassgrippairshndmat4s[fpind]):
fpgid0 = self.toass.icoassgrippairsids[fpind][pairind][0]
fpgid1 = self.toass.icoassgrippairsids[fpind][pairind][1]
self.regg.add_edge('assrgt'+str(fpgid0), 'asslft'+str(fpgid1), weight = 1, edgetype = 'asstransit')
if __name__=='__main__':
gdb = db.GraspDB()
nxtrobot = nextage.NxtRobot()
handpkg = rtq85nm
base = pandactrl.World(camp=[700,300,600], lookatp=[0,0,0])
this_dir, this_filename = os.path.split(__file__)
obj0path = os.path.join(os.path.split(this_dir)[0]+os.sep, "grip", "objects", "planefrontstay.stl")
obj0Mat4 = Mat4.identMat()
obj1path = os.path.join(os.path.split(this_dir)[0]+os.sep, "grip", "objects", "planewheel.stl")
obj1Mat4 = Mat4(obj0Mat4)
obj1Mat4.setCell(3,1,32)
obj1Mat4.setCell(3,2,10)
assDirect1to0 = Vec3(0, -70, 0)
obj0trimesh = trimesh.load_mesh(obj0path)
obj0np = pg.packpandanp(obj0trimesh.vertices, obj0trimesh.face_normals, obj0trimesh.faces)
obj0np.setMat(obj0Mat4)
obj0np.setColor(.7,.3,0)
obj1trimesh = trimesh.load_mesh(obj1path)
obj1np = pg.packpandanp(obj1trimesh.vertices, obj1trimesh.face_normals, obj1trimesh.faces)
obj1np.setMat(obj1Mat4)
obj1np.setColor(0,.3,0.7)
sprotmat4 = Mat4(1.0,0.0,0.0,0.0,\
0.0,0.0,1.0,0.0,\
0.0,-1.0,0.0,0.0,\
350,-400,15.0,1.0)
whrotmat4 = Mat4(1.0,0.0,0.0,0.0,\
0.0,0.0,1.0,0.0,\
0.0,-1.0,0.0,0.0,\
350,400,0.0,1.0)
regass = RegripTppAss(base, obj0path, obj0Mat4, obj1path, obj1Mat4, assDirect1to0, gdb, nxtrobot, handpkg)
regass.findshortestpath(obj0SRotmat4 = sprotmat4, obj1SRotmat4 = whrotmat4)
pltfig = plt.figure()
ax = pltfig.add_subplot(111)
regass.plotGraph(ax, offset0 = [600,-800], offset1 = [600, 800])
print regass.directshortestpaths
regass.plotshortestpath(ax)
plt.axis("equal")
plt.show()
# obj1np.setMat(whrotmat4)
# obj1np.reparentTo(base.render)
# pg.plotAxis(base.render)
base.run() | [
"[email protected]"
] | |
16f59fda29cf8113314e0316016f0a0ce62a268b | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4042/149004042.py | a1fcdf274ced8b3da938a656143708a70d73b544 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,788 | py | from bots.botsconfig import *
from records004042 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'NT',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99999},
{ID: 'TDS', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
]},
{ID: 'TFS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'FGS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'PCT', MIN: 0, MAX: 99999},
{ID: 'AMT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
8b15c70ba21396494d75e7deb3f8b022da626582 | 1403d8670aa2e3ad6b72524688ca6318a84cd1f2 | /Chapter 12/nest_egg_mcs_1st_5yrs.py | a61e2eff6343367a5e2e3d49f1a142488cc3f307 | [] | no_license | afettouhi/ImpracticalPythonProjects-py38 | 033037a9b9b4af1401b968fa6f51fa0adde8be3f | 074b9f8b77d72cac2bb33f57a918c509a4f0ef17 | refs/heads/master | 2023-01-07T08:05:42.849002 | 2020-11-02T06:12:58 | 2020-11-02T06:12:58 | 304,799,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,641 | py | """
Retirement nest egg calculator using Monte Carlo simulation.
"""
import sys
import random
import matplotlib.pyplot as plt
def read_to_list(file_name):
"""
Open a file of data in percent, convert to decimal & return a list.
"""
with open(file_name) as in_file:
lines = [float(line.strip()) for line in in_file]
decimal = [round(line / 100, 5) for line in lines]
return decimal
def default_input(prompt, default=None):
"""
Allow use of default values in input
"""
prompt = '{} [{}]: '.format(prompt, default)
response = input(prompt)
if not response and default:
return default
else:
return response
# load data files with original data in percent form
print("\nNote: Input data should be in percent, not decimal!\n")
try:
bonds = read_to_list('../data/10-yr_TBond_returns_1926-2013_pct.txt')
stocks = read_to_list('../data/SP500_returns_1926-2013_pct.txt')
blend_40_50_10 = read_to_list('../data/S-B-C_blend_1926-2013_pct.txt')
blend_50_50 = read_to_list('../data/S-B_blend_1926-2013_pct.txt')
infl_rate = read_to_list('../data/annual_infl_rate_1926-2013_pct.txt')
except IOError as e:
print("{}. \nTerminating program.".format(e), file=sys.stderr)
sys.exit(1)
# get user input; use dictionary for investment-type arguments
investment_type_args = {'bonds': bonds, 'stocks': stocks,
'sb_blend': blend_50_50, 'sbc_blend': blend_40_50_10}
# print input legend for user
print(" stocks = SP500")
print(" bonds = 10-yr Treasury Bond")
print(" sb_blend = 50% SP500/50% TBond")
print("sbc_blend = 40% SP500/50% TBond/10% Cash\n")
print("Press ENTER to take default value shown in [brackets]. \n")
# get user input
invest_type = default_input("Enter investment type: (stocks, bonds, sb_blend," \
" sbc_blend): \n", 'bonds').lower()
while invest_type not in investment_type_args:
invest_type = input("Invalid investment. Enter investment type " \
"as listed in prompt: ")
start_value = default_input("Input starting value of investments: \n", \
'2000000')
while not start_value.isdigit():
start_value = input("Invalid input! Input integer only: ")
withdrawal_1 = default_input("Input annual pre-tax withdrawal for " \
"first 5 yrs(today's $): \n", '100000')
while not withdrawal_1.isdigit():
withdrawal_1 = input("Invalid input! Input integer only: ")
withdrawal_2 = default_input("Input annual pre-tax withdrawal for " \
"remainder (today's $): \n", '80000')
while not withdrawal_2.isdigit():
withdrawal_2 = input("Invalid input! Input integer only: ")
min_years = default_input("Input minimum years in retirement: \n", '18')
while not min_years.isdigit():
min_years = input("Invalid input! Input integer only: ")
most_likely_years = default_input("Input most-likely years in retirement: \n",
'25')
while not most_likely_years.isdigit():
most_likely_years = input("Invalid input! Input integer only: ")
max_years = default_input("Input maximum years in retirement: \n", '40')
while not max_years.isdigit():
max_years = input("Invalid input! Input integer only: ")
num_cases = default_input("Input number of cases to run: \n", '50000')
while not num_cases.isdigit():
num_cases = input("Invalid input! Input integer only: ")
# check for other erroneous input
if not int(min_years) < int(most_likely_years) < int(max_years) \
or int(max_years) > 99:
print("\nProblem with input years.", file=sys.stderr)
print("Requires Min < ML < Max & Max <= 99.", file=sys.stderr)
sys.exit(1)
def montecarlo(returns):
"""
Run MCS & return investment value at death & and # of times bankrupt.
"""
case_count = 0
bankrupt_count = 0
outcome = []
while case_count < int(num_cases):
investments = int(start_value)
start_year = random.randrange(0, len(returns))
duration = int(random.triangular(int(min_years), int(max_years),
int(most_likely_years)))
end_year = start_year + duration
lifespan = [i for i in range(start_year, end_year)]
bankrupt = 'no'
# build temporary lists for each case
lifespan_returns = []
lifespan_infl = []
for i in lifespan:
lifespan_returns.append(returns[i % len(returns)])
lifespan_infl.append(infl_rate[i % len(infl_rate)])
# loop through each year of retirement for each case run
for index, i in enumerate(lifespan_returns):
infl = lifespan_infl[index]
# don't adjust for inflation the first year
if index == 0:
withdraw_infl_adj_1 = int(withdrawal_1)
withdraw_infl_adj_2 = int(withdrawal_2)
else:
withdraw_infl_adj_1 = int(withdraw_infl_adj_1 * (1 + infl))
withdraw_infl_adj_2 = int(withdraw_infl_adj_2 * (1 + infl))
if index < 5:
withdraw_infl_adj = withdraw_infl_adj_1
else:
withdraw_infl_adj = withdraw_infl_adj_2
investments -= withdraw_infl_adj
investments = int(investments * (1 + i))
if investments <= 0:
bankrupt = 'yes'
break
if bankrupt == 'yes':
outcome.append(0)
bankrupt_count += 1
else:
outcome.append(investments)
case_count += 1
return outcome, bankrupt_count
def bankrupt_prob(outcome, bankrupt_count):
"""
Calculate & return chance of running out of money & print statistics.
"""
total = len(outcome)
odds = round(100 * bankrupt_count / total, 1)
print("\nInvestment type: {}".format(invest_type))
print("Starting value: ${:,}".format(int(start_value)))
print("Annual withdrawal first 5 yrs: ${:,}".format(int(withdrawal_1)))
print("Annual withdrawal after 5 yrs: ${:,}".format(int(withdrawal_2)))
print("Years in retirement (min-ml-max): {}-{}-{}"
.format(min_years, most_likely_years, max_years))
print("Number of runs: {:,}\n".format(len(outcome)))
print("Odds of running out of money: {}%\n".format(odds))
print("Average outcome: ${:,}".format(int(sum(outcome) / total)))
print("Minimum outcome: ${:,}".format(min(i for i in outcome)))
print("Maximum outcome: ${:,}".format(max(i for i in outcome)))
return odds
def main():
"""
Run the program and draw bar chart of results.
"""
outcome, bankrupt_count = montecarlo(investment_type_args[invest_type])
odds = bankrupt_prob(outcome, bankrupt_count)
# generate matplotlib bar chart
plotdata = outcome[:3000] # only plot first 3000 runs
plt.figure('Outcome by Case (showing first {} runs)'.format(len(plotdata)),
figsize=(16, 5)) # size is width, height in inches
index = [i + 1 for i in range(len(plotdata))]
plt.bar(index, plotdata, color='black')
plt.xlabel('Simulated Lives', fontsize=18)
plt.ylabel('$ Remaining', fontsize=18)
plt.ticklabel_format(style='plain', axis='y')
ax = plt.gca()
ax.get_yaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}"
.format(int(x))))
plt.title('Probability of running out of money = {}%'.format(odds),
fontsize=20, color='red')
plt.show()
# run program
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
25f0f93e00812237893b74b780d979cce30a1d81 | 5a1eea357eb4fc0c5a1d1478e69ff7779b8a686a | /test/view_helpers/test_Maps_Views.py | 34f1185e2d51515ee91331a2041ac2d85e7a2bc9 | [
"Apache-2.0"
] | permissive | dariovillalon/OSBot-browser | 77059951bb0d2bc6bae13559e17fc84f5268d837 | bcb7e9f4b8c6980afb32d9ba1299c20fc744bf9d | refs/heads/master | 2021-05-18T20:05:01.597025 | 2020-03-29T23:17:27 | 2020-03-29T23:17:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | from unittest import TestCase
from osbot_aws.apis.Lambda import Lambda
from pbx_gs_python_utils.utils.Dev import Dev
from osbot_browser.browser.Browser_Lamdba_Helper import Browser_Lamdba_Helper
from osbot_browser.view_helpers.Maps_Views import Maps_Views
from gw_bot.Deploy import Deploy
class Test_Maps_Views(TestCase):
def setUp(self):
Deploy().setup() # set local ossbot environment
self.maps_views = Maps_Views()
self.png_data = None
self.result = None
def tearDown(self):
if self.result:
Dev.pprint(self.result)
if self.png_data:
Browser_Lamdba_Helper().save_png_data(self.png_data)
def test_default(self):
self.result = self.maps_views.default(headless=False)#,channel='DJ8UA0RFT')
def test_exec_js(self):
channel = 'DJ8UA0RFT'
channel = None
params = ["maps.add_component('aaa 123' , 2, 1)"]
self.result = self.maps_views.exec_js(headless=False ,channel=channel, params=params)
def test_via_lambda_execution(self):
self.test_update_lambda()
view = 'default'
code = ''
aws_lambda = Lambda('osbot_browser.lambdas.lambda_browser')
payload = {"params": ["maps", view, code],
'data': { 'channel' : 'DJ8UA0RFT'}}
self.result = aws_lambda.invoke(payload)
def test_via_lambda_execution__version(self):
self.test_update_lambda()
aws_lambda = Lambda('osbot_browser.lambdas.lambda_browser')
payload = {"params": ["maps", "version"],'data': {}}
self.result = aws_lambda.invoke(payload)
def test_update_lambda_browser(self):
Deploy().setup().deploy_lambda__browser()
def test_update_lambda_oss_bot(self):
Deploy().setup().deploy_lambda__gw_bot() | [
"[email protected]"
] | |
e6f1611a6eb8c993d49fa2bcc2526dd8bfc0c6dc | 6d4e31ef0e828db01775dbb22a43b11680f86059 | /mysql_proto/plugins/plugin.py | ba82307a5223e7938bcaaa46ecdaa8922781fd69 | [
"MIT"
] | permissive | alvinzane/PyMP | a81a1e07a962604f96d5c68ef89cbf9947cbbccf | cfe426dbca4afae95714d6026903d2678a46ddc4 | refs/heads/master | 2020-04-07T20:12:49.004874 | 2019-01-05T15:18:43 | 2019-01-05T15:18:43 | 158,679,588 | 0 | 0 | null | 2018-11-22T10:08:29 | 2018-11-22T10:08:29 | null | UTF-8 | Python | false | false | 996 | py | # coding=utf-8
class Plugin(object):
def init(self, context):
raise NotImplementedError()
def read_handshake(self, context):
raise NotImplementedError()
def send_handshake(self, context):
raise NotImplementedError()
def read_auth(self, context):
raise NotImplementedError()
def send_auth(self, context):
raise NotImplementedError()
def read_auth_result(self, context):
raise NotImplementedError()
def send_auth_result(self, context):
raise NotImplementedError()
def read_query(self, context):
raise NotImplementedError()
def send_query(self, context):
raise NotImplementedError()
def read_query_result(self, context):
raise NotImplementedError()
def send_query_result(self, context):
raise NotImplementedError()
def cleanup(self, context):
raise NotImplementedError()
def shutdown(self, context):
raise NotImplementedError()
| [
"[email protected]"
] | |
594a901265ccc41397cbed360de2d2e85a9e531b | 91da8a59561d6f2c7852c0548298434e0ede2ac7 | /Tree/Construct_Binary_Tree_from_Preorder_and_Postorder_Traversal.py | 2c68547cd8733c752bd84ee60f23b160ede1ed3f | [] | no_license | prashant97sikarwar/leetcode | 6d3828772cc426ccf53dad07edb1efbc2f1e1ded | e76054e27a5d4493bd1bcef2ebdeb21d257afb63 | refs/heads/master | 2023-08-23T05:06:23.181869 | 2021-10-28T18:19:10 | 2021-10-28T18:19:10 | 286,057,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | #Problem Link :- https://leetcode.com/problems/construct-binary-tree-from-preorder-and-postorder-traversal/
"""Return any binary tree that matches the given preorder and postorder traversals.
Values in the traversals pre and post are distinct positive integers."""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructFromPrePost(self, pre,post):
if not pre:
return
root = TreeNode(pre[0])
if len(pre) == 1:
return root
L = post.index(pre[1]) + 1
root.left = self.constructFromPrePost(pre[1:L+1],post[:L])
root.right = self.constructFromPrePost(pre[L+1:],post[L:-1])
return root | [
"[email protected]"
] | |
8ede1ff4e6d2f58b5266da7f5c33d2592bcb46ca | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=29/params.py | 8755b15b27c0b917e2860a10c7ce53504eaaa02b | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.547667',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 29,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
391dd31b81e74cd0e0f234e71ed4001eaee94ad6 | 0478abafc05f1dd55ddf6054d95fef73e9fa03e9 | /quati/features.py | 30821e305cafde7bfdb2a894b5c9975c70a85fb5 | [
"MIT"
] | permissive | deep-spin/quati | 89bce0868b36b0d7902659507b72acfbd01ada98 | 62a6769475090182fe2990b2864d66f8e2081a32 | refs/heads/master | 2023-03-12T09:22:31.520259 | 2021-03-02T15:13:22 | 2021-03-02T15:13:22 | 330,678,540 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | """Useful for sequence tagging tasks like POS or NER."""
from quati import opts
from quati.dataset.fields.affixes import AffixesField
from quati.dataset.fields.caps import CapsField
def build(options):
prefixes_field = AffixesField()
suffixes_field = AffixesField()
caps_field = CapsField()
fields_tuples = []
if options.use_prefixes:
fields_tuples.append(('prefixes', prefixes_field))
if options.use_suffixes:
fields_tuples.append(('suffixes', suffixes_field))
if options.use_caps:
fields_tuples.append(('caps', caps_field))
return fields_tuples
def load(path):
options = opts.load(path)
return build(options)
class Caps:
all_upper = 'UPPER' # acronyms
all_lower = 'LOWER' # normal words
first_upper = 'FIRST' # names, titles
non_alpha = 'NON_ALPHA' # dates, hours, punctuations
other = 'OTHER' # any other
def extract_prefixes(words, min_length, max_length):
return extract_affixes(words, min_length, max_length, affix_type='prefix')
def extract_suffixes(words, min_length, max_length):
return extract_affixes(words, min_length, max_length, affix_type='suffix')
def extract_affixes(words, min_length, max_length, affix_type='prefix'):
total_length = max_length - min_length + 1
pad_token = '<pad-{}>'.format(affix_type)
def fill_with_pad(v):
for _ in range(total_length - len(v)):
v.append(pad_token)
new_words = []
for sentence in words:
tokens = sentence.split()
affixes_tokens = []
for token in tokens:
affixes = []
if len(token) >= min_length:
i, j = min_length, min(max_length, len(token))
for k in range(i, j + 1):
affix = token[:k] if affix_type == 'prefix' else token[-k:]
affixes.append(affix)
fill_with_pad(affixes)
affixes_tokens.extend(affixes)
new_words.append(' '.join(affixes_tokens))
return new_words
def extract_caps(words):
new_words = []
for sentence in words:
tokens = sentence.split()
caps_tokens = []
for token in tokens:
if not token.isalpha():
caps_tokens.append(Caps.non_alpha)
elif token.isupper():
caps_tokens.append(Caps.all_upper)
elif token.islower():
caps_tokens.append(Caps.all_lower)
elif token[0].isupper() and token[1:].islower():
caps_tokens.append(Caps.first_upper)
else:
caps_tokens.append(Caps.other)
new_words.append(caps_tokens)
return new_words
| [
"[email protected]"
] | |
80d6da4ad8540d5da3fd49cc9e8e50e91ea0b833 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03673/s956353086.py | bb1c4270936c6d0fa8f2322285846a5e4b4a2c72 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | n = int(input())
a = list(map(int,input().rstrip().split(" ")))
b1 = []
b2 = []
ans = []
if n % 2 == 0:
for i in range(n):
if i % 2 == 0:
b1.append(a[i])
else:
b2.append(a[i])
b2.reverse()
ans = b2 + b1
print(" ".join(str(i) for i in ans))
else:
for i in range(n):
if i % 2 == 0:
b1.append(a[i])
else:
b2.append(a[i])
b1.reverse()
ans = b1 + b2
print(" ".join(str(i) for i in ans)) | [
"[email protected]"
] | |
7a98373803beb4dd79354dd8d95b1d9369c097d6 | 9eec6ca0e14c50298d0ecaa6e2f75b8a7c0f76c4 | /resolwe/flow/migrations/0015_refactor_relations_1.py | 96ddbc7acd5f125e651bdcc17c9c5d09d0c47f30 | [
"Apache-2.0"
] | permissive | mzagmajster/resolwe | 81e65fca94bd14c59b6da718e2f6c4c0b41481b1 | da371a3ec0260a45ccab848704c6a339a0de79cc | refs/heads/master | 2022-02-20T05:23:41.953200 | 2019-08-27T16:36:25 | 2019-08-27T16:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-06 02:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flow', '0014_track_resources'),
]
operations = [
migrations.RenameModel(
old_name='PositionInRelation',
new_name='RelationPartition',
),
migrations.RenameField(
model_name='relation',
old_name='label',
new_name='category',
),
migrations.AlterField(
model_name='relation',
name='category',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='relation',
name='unit',
field=models.CharField(blank=True, choices=[('s', 'Second'), ('min', 'Minute'), ('hr', 'Hour'), ('d', 'Day'), ('wk', 'Week')], max_length=3, null=True),
),
migrations.AlterField(
model_name='relation',
name='collection',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flow.Collection'),
),
migrations.AlterUniqueTogether(
name='relation',
unique_together=set([('collection', 'category')]),
),
migrations.AddField(
model_name='relationpartition',
name='label',
field=models.CharField(blank=True, db_index=True, max_length=30, null=True),
),
migrations.AlterUniqueTogether(
name='relationpartition',
unique_together=set([('entity', 'relation')]),
),
]
| [
"[email protected]"
] | |
d12a24546543e5d538bcf9f62e9a5d56191d6267 | 661ed0d6372d11d81e8fe11e638956325c289648 | /src/ui/__init__.py | 64faf0e51e734171e52f45a9476bf9b23f12ca5a | [
"Apache-2.0"
] | permissive | fanqyyy/RT-Flash | 7b9dd7ceefed1296ec2602b08a8f8c6349524d05 | 5bf1549ef3cea76e96b554b2e519436af0084f36 | refs/heads/master | 2020-07-27T19:38:03.322046 | 2019-09-18T12:25:39 | 2019-09-18T12:25:39 | 209,198,017 | 0 | 0 | Apache-2.0 | 2019-09-18T02:19:06 | 2019-09-18T02:19:06 | null | UTF-8 | Python | false | false | 82 | py | #!/usr/bin/env python
import uicore
import uidef
__all__ = ["uicore", "uidef"]
| [
"[email protected]"
] | |
e9e391dcd37dfd9a072ffaaf80a20be09106b74f | 8578ae5be776b49559fa95ce30f6b45b6a82b73a | /test/functional/feature_filelock.py | c25d6215ebf2c07361b684e43b155e7f75109040 | [
"MIT"
] | permissive | devcoin/core | 3f9f177bd9d5d2cc54ff95a981cfe88671206ae2 | f67e8b058b4316dd491615dc3f8799a45f396f4a | refs/heads/master | 2023-05-25T03:42:03.998451 | 2023-05-24T07:59:22 | 2023-05-24T08:02:14 | 21,529,485 | 16 | 13 | MIT | 2022-01-07T17:04:18 | 2014-07-05T22:42:13 | C | UTF-8 | Python | false | false | 2,539 | py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second devcoind instance using the same datadir or wallet."""
import os
import random
import string
from test_framework.test_framework import DevcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(DevcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start()
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, self.chain)
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second devcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME'])
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
def check_wallet_filelock(descriptors):
wallet_name = ''.join([random.choice(string.ascii_lowercase) for _ in range(6)])
self.nodes[0].createwallet(wallet_name=wallet_name, descriptors=descriptors)
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second devcoind instance using the same wallet")
if descriptors:
expected_msg = "Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another devcoind?"
else:
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if self.is_bdb_compiled():
check_wallet_filelock(False)
if self.is_sqlite_compiled():
check_wallet_filelock(True)
if __name__ == '__main__':
FilelockTest().main()
| [
"[email protected]"
] | |
62a27c6887146dc8333e488d19690b93747bab0d | 3697d04e8daa01e880f8078bc38426eb23389b90 | /oireachtas_api/models/inline_response2004_member_membership.py | f4e7ce6a8ec47a0c9e42fe2f565bb8c2449df1e1 | [] | no_license | Irishsmurf/OireachtasAPI | 3e4ed3b6a1a0fd815cc929f16af0b3ef39d76e13 | 979d354d39cc2957c4009c62ef205215ae8ba123 | refs/heads/master | 2023-08-02T14:56:29.951977 | 2020-04-13T18:33:56 | 2020-04-13T18:33:56 | 255,411,309 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,188 | py | # coding: utf-8
"""
Houses of the Oireachtas Open Data APIs
The Houses of the Oireachtas is providing these APIs to allow our datasets to be retrieved and reused as widely as possible. They are intended to be used in conjunction with https://data.oireachtas.ie, from where our datasets can be accessed directly. By using the APIs, users can make metadata queries to identify the specific data they require. New data are available through the API as soon as they are published. Currently, https://data.oireachtas.ie contains data in XML format from the Official Report of the Houses of the Oireachtas (the \"debates\") and replies to Parliamentary Questions in XML files complying with the [Akoma Ntoso](http://akomantoso.org) schema, as well data in PDF format for Bills, Acts and other documents published by the Houses of the Oireachtas. Files can be retrieved from https://data.oireachtas.ie by adding the URI fragment contained in the \"formats\" fields of the JSON documents returned by these APIs. At the moment only PDF and XML files are available directly from https://data.oireachtas.ie, but this will become the endpoint for direct access of all \"uri\" fields in the data queried through https://api.oireachtas.ie. We will also be making bulk downloads available through https://data.oireachtas.ie. Please note the APIs are a work in progress. We are working on expanding the range of datasets we publish, and we are interested in hearing about how to make these APIs more useful and wide ranging. For these reasons, we welcome any feedback, suggestions and user stories to [email protected] Data published through these APIs are made available under the [Oireachtas (Open Data) PSI Licence](https://beta.oireachtas.ie/en/open-data/license/) # noqa: E501
OpenAPI spec version: 1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse2004MemberMembership(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'parties': 'list[InlineResponse2004MemberMembershipParties]',
'house': 'InlineResponse2004MemberMembershipHouse',
'offices': 'list[InlineResponse2004MemberMembershipOffices]',
'uri': 'str',
'represents': 'list[InlineResponse2004MemberMembershipRepresents]',
'date_range': 'InlineResponse2004MemberMembershipPartyDateRange'
}
attribute_map = {
'parties': 'parties',
'house': 'house',
'offices': 'offices',
'uri': 'uri',
'represents': 'represents',
'date_range': 'dateRange'
}
def __init__(self, parties=None, house=None, offices=None, uri=None, represents=None, date_range=None): # noqa: E501
"""InlineResponse2004MemberMembership - a model defined in Swagger""" # noqa: E501
self._parties = None
self._house = None
self._offices = None
self._uri = None
self._represents = None
self._date_range = None
self.discriminator = None
if parties is not None:
self.parties = parties
if house is not None:
self.house = house
if offices is not None:
self.offices = offices
if uri is not None:
self.uri = uri
if represents is not None:
self.represents = represents
if date_range is not None:
self.date_range = date_range
@property
def parties(self):
"""Gets the parties of this InlineResponse2004MemberMembership. # noqa: E501
:return: The parties of this InlineResponse2004MemberMembership. # noqa: E501
:rtype: list[InlineResponse2004MemberMembershipParties]
"""
return self._parties
@parties.setter
def parties(self, parties):
"""Sets the parties of this InlineResponse2004MemberMembership.
:param parties: The parties of this InlineResponse2004MemberMembership. # noqa: E501
:type: list[InlineResponse2004MemberMembershipParties]
"""
self._parties = parties
@property
def house(self):
"""Gets the house of this InlineResponse2004MemberMembership. # noqa: E501
:return: The house of this InlineResponse2004MemberMembership. # noqa: E501
:rtype: InlineResponse2004MemberMembershipHouse
"""
return self._house
@house.setter
def house(self, house):
"""Sets the house of this InlineResponse2004MemberMembership.
:param house: The house of this InlineResponse2004MemberMembership. # noqa: E501
:type: InlineResponse2004MemberMembershipHouse
"""
self._house = house
@property
def offices(self):
"""Gets the offices of this InlineResponse2004MemberMembership. # noqa: E501
:return: The offices of this InlineResponse2004MemberMembership. # noqa: E501
:rtype: list[InlineResponse2004MemberMembershipOffices]
"""
return self._offices
@offices.setter
def offices(self, offices):
"""Sets the offices of this InlineResponse2004MemberMembership.
:param offices: The offices of this InlineResponse2004MemberMembership. # noqa: E501
:type: list[InlineResponse2004MemberMembershipOffices]
"""
self._offices = offices
@property
def uri(self):
"""Gets the uri of this InlineResponse2004MemberMembership. # noqa: E501
:return: The uri of this InlineResponse2004MemberMembership. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this InlineResponse2004MemberMembership.
:param uri: The uri of this InlineResponse2004MemberMembership. # noqa: E501
:type: str
"""
self._uri = uri
@property
def represents(self):
"""Gets the represents of this InlineResponse2004MemberMembership. # noqa: E501
:return: The represents of this InlineResponse2004MemberMembership. # noqa: E501
:rtype: list[InlineResponse2004MemberMembershipRepresents]
"""
return self._represents
@represents.setter
def represents(self, represents):
"""Sets the represents of this InlineResponse2004MemberMembership.
:param represents: The represents of this InlineResponse2004MemberMembership. # noqa: E501
:type: list[InlineResponse2004MemberMembershipRepresents]
"""
self._represents = represents
@property
def date_range(self):
"""Gets the date_range of this InlineResponse2004MemberMembership. # noqa: E501
:return: The date_range of this InlineResponse2004MemberMembership. # noqa: E501
:rtype: InlineResponse2004MemberMembershipPartyDateRange
"""
return self._date_range
@date_range.setter
def date_range(self, date_range):
"""Sets the date_range of this InlineResponse2004MemberMembership.
:param date_range: The date_range of this InlineResponse2004MemberMembership. # noqa: E501
:type: InlineResponse2004MemberMembershipPartyDateRange
"""
self._date_range = date_range
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2004MemberMembership, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2004MemberMembership):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
e8b4f296e257361089611d6c61f6e971a91144f8 | 4feaf520374804d6f3feebe3700fb448692a44ba | /pullenti/ner/booklink/internal/EpNerBooklinkInternalResourceHelper.py | 903e0b5ab200e9414e8adbf15e7a5c0b1f55b3a4 | [] | no_license | MihaJjDa/APCLtask | f7be3fb6b0f31801196bf779f6a7e62ce245493b | 4745b45e199887d433ab256bb2e2ebf5dbe3f7cd | refs/heads/master | 2020-04-16T17:15:10.846647 | 2020-02-24T16:06:43 | 2020-02-24T16:06:43 | 165,769,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | # Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
import io
from pullenti.unisharp.Utils import Utils
class EpNerBooklinkInternalResourceHelper:
""" Это для поддержки получения встроенных ресурсов """
@staticmethod
def getBytes(name : str) -> bytearray:
""" Получить встроенный ресурс
Args:
name(str): имя, на который оканчивается ресурс
"""
# ignored: assembly = EpNerBooklinkInternalResourceHelper.
names = Utils.getResourcesNames('pullenti.ner.booklink.properties', '.png')
for n in names:
if (Utils.endsWithString(n, name, True)):
try:
inf = Utils.getResourceInfo('pullenti.ner.booklink.properties', n)
if (inf is None):
continue
with Utils.getResourceStream('pullenti.ner.booklink.properties', n) as stream:
buf = Utils.newArrayOfBytes(Utils.getLengthIO(stream), 0)
Utils.readIO(stream, buf, 0, len(buf))
return buf
except Exception as ex:
pass
return None
@staticmethod
def getString(name : str) -> str:
arr = EpNerBooklinkInternalResourceHelper.getBytes(name)
if (arr is None):
return None
if ((len(arr) > 3 and arr[0] == (0xEF) and arr[1] == (0xBB)) and arr[2] == (0xBF)):
return arr[3:3+len(arr) - 3].decode("UTF-8", 'ignore')
else:
return arr.decode("UTF-8", 'ignore') | [
"[email protected]"
] | |
236c70ae19c2fddd07ffe7943e00c0eec9afbb56 | 6e92586ae20e4c59e9d3d264a3aaf06c6fe3e572 | /user/admin.py | 4bbc827084b60807a8dd34714ce21e2ad3c3db4f | [] | no_license | aktanaktan/hackathon | 93777c51c5442a3540e4318b8bfa9eb8b2402ac5 | ed7e97b6eab8ae2fd61139778ef01965663612fa | refs/heads/master | 2023-05-22T16:23:59.661007 | 2021-06-18T15:51:34 | 2021-06-18T15:51:34 | 377,857,087 | 0 | 2 | null | 2021-06-18T10:21:46 | 2021-06-17T14:22:15 | Python | UTF-8 | Python | false | false | 103 | py | from django.contrib import admin
from user.models import CustomUser
admin.site.register(CustomUser)
| [
"[email protected]"
] | |
89525c39e243900cdcdb007d0a3d31922db4db1d | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_feature_set/feature_set_specification_schema.py | 761c250e8023cf2bbbee0291d9fdcfb5aa45d48e | [
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 660 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument,no-self-use
from marshmallow import fields, post_load
from azure.ai.ml._schema.core.schema import PatchedSchemaMeta
class FeatureSetSpecificationSchema(metaclass=PatchedSchemaMeta):
path = fields.Str(required=True, allow_none=False)
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.entities._feature_set.feature_set_specification import FeatureSetSpecification
return FeatureSetSpecification(**data)
| [
"[email protected]"
] | |
d3f2ade7125fbc5347a5c7961cb691e2c8b5d211 | a5826609647289ca4c3406a99114da0abacacc2f | /vycontrol/config/models.py | 010b353f1efba2c54df058bcac17af6d89cad9c8 | [
"MIT"
] | permissive | Akmon47/vycontrol | a18f50dcaeaf350525858b20bd33e4d6ac2ee7a0 | 898c2ec0e5d315f89c37f07e87a6bc1b096b0e8e | refs/heads/master | 2023-03-29T16:42:03.361002 | 2021-04-05T07:38:52 | 2021-04-05T07:38:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from django.db import models
from django.contrib.auth.models import Group
class Instance(models.Model):
hostname = models.CharField(max_length=120, primary_key=True)
alias = models.CharField(max_length=30)
port = models.IntegerField()
key = models.CharField(max_length=100)
https = models.BooleanField()
main = models.BooleanField(default=False)
group = models.ForeignKey(Group, null=True, on_delete=models.SET_NULL)
Group.add_to_class('active', models.BooleanField(default=True))
| [
"[email protected]"
] | |
fe0dca761fc20048bba333252839b707f4c21d9d | 1cf380b819a399c3f58a7ad13f5daeb5659cead3 | /wrf_management/modules/compress_cli.py | ddd637fdc060ad1c63957c75b5fa0d272afc416e | [] | no_license | daliagachc/wrf_management | dd88cf5d6279457f4e2b414acfa0d0cbaaad3873 | 4ee88c668ed0252e68713aa756b74344ecada615 | refs/heads/master | 2021-06-13T09:39:08.477315 | 2021-04-09T14:43:21 | 2021-04-09T14:43:21 | 171,271,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | # project name: wrf_management
# created by diego aliaga daliaga_at_chacaltaya.edu.bo
# todo still under construction
import sys
import wrf_management.modules.CompressOut as CO
import os
zip_path = os.path.join(os.path.dirname(sys.argv[1]),'compresser_log_xxx')
input_dic = dict(
source_path = sys.argv[1],
zip_path = zip_path,
db_path = os.path.join(zip_path,f'zip{CO.get_unique_id()}.sqlite'),
lock_last_date = False,
source_path_is_file = True,
compress_level_target = 4
)
if __name__=="__main__":
co = CO.Compresser(**input_dic)
co.get_and_zip_next_row(move=True) | [
"[email protected]"
] | |
dc44b2f609558f860a5b9afec10dfdf9fedc3d50 | 78b7b3e27553ccf0b89c24cbd11662600db26b4c | /ScrapeNASAPicDayWebsite/.history/scraper_20190701160044.py | 59d3cf70adc9b86d844ece27249a8b2220f9b34b | [] | no_license | web3-qa/intermediatePython | 2c23408bd6d6dffc070b92e1155d3c072cfe040c | b4791db2bcb59aaf9c447cf50ffd4d21cacbe16b | refs/heads/master | 2023-02-08T14:18:54.288227 | 2019-07-18T13:31:23 | 2019-07-18T13:31:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | import urllib.request
import urllib.parse import urljoin
from bs4 import BeautifulSoup
baseURL = "http://apod.nasa.gov/apod/archivepix.html"
content = urllib.request.urlopen(baseURL).read()
BeautifulSoup(content, "lxml").findAll("a") | [
"[email protected]"
] | |
4646d73a7577d2a17ca8a2c885d77ab9eac866e3 | 3670f2ca6f5609e14cce8c31cb1348052d0b6358 | /xacro/geometry_tutorials/turtle_tf2/nodes/dynamic_tf2_broadcaster.py | 19ed163e4729d54006864bd9232a374a93da9573 | [] | no_license | jincheng-ai/ros-melodic-python3-opencv4 | b0f4d3860ab7ae3d683ade8aa03e74341eff7fcf | 47c74188560c2274b8304647722d0c9763299a4b | refs/heads/main | 2023-05-28T17:37:34.345164 | 2021-06-17T09:59:25 | 2021-06-17T09:59:25 | 377,856,153 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import tf2_ros
import geometry_msgs.msg
import math
if __name__ == '__ STATIC main__':
rospy.init_node('my_tf2_broadcaster')
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.frame_id = "turtle1"
t.child_frame_id = "carrot1"
t.transform.translation.x = 0.0
t.transform.translation.y = 2.0
t.transform.translation.z = 0.0
t.transform.rotation.x = 0.0
t.transform.rotation.y = 0.0
t.transform.rotation.z = 0.0
t.transform.rotation.w = 1.0
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
t.header.stamp = rospy.Time.now()
br.sendTransform(t)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('my_tf2_broadcaster')
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.frame_id = "turtle1"
t.child_frame_id = "carrot1"
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
x = rospy.Time.now().to_sec() * math.pi
t.header.stamp = rospy.Time.now()
t.transform.translation.x = 10 * math.sin(x)
t.transform.translation.y = 10 * math.cos(x)
t.transform.translation.z = 0.0
t.transform.rotation.x = 0.0
t.transform.rotation.y = 0.0
t.transform.rotation.z = 0.0
t.transform.rotation.w = 1.0
br.sendTransform(t)
rate.sleep()
| [
"[email protected]"
] | |
2ad9d5f7d2ec70ad70aa35eac7b92eab5a4abe65 | 7a27e217bf51ed0232e1a4cc988687998d72795c | /examples/bad.py | baf1843415058c898dd3d4f8fa99755c6095b180 | [
"MIT"
] | permissive | sobolevn/mypy-test | 3989559a89382b487ac1ed7d4f46365da9acfe10 | 7ab0fa440dee37b441824eb24ac9b0af2ebde9c5 | refs/heads/master | 2023-06-17T15:03:12.648991 | 2021-07-15T12:13:52 | 2021-07-15T12:13:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13 | py | a = 1
a = ""
| [
"[email protected]"
] | |
4aa24890760eacd11b6fda1407e944b639c1e938 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02418/s464405021.py | 0a3f0c6199f7906606d605bc55148ec8cdb0c8dd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | s = input()
p = input()
s = s*2
if p in s:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
2be0c0e3c831afe398c67d50454cbc3f21f0f197 | 68c4805ad01edd612fa714b1e0d210115e28bb7d | /CoreSource/speechRec.py | 56cbe4229ae9f989177c560c5608cfb22f72d205 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Happy-Egg/redesigned-happiness | ac17a11aecc7459f4ebf0afd7d43de16fb37ae2c | 08b705e3569f3daf31e44254ebd11dd8b4e6fbb3 | refs/heads/master | 2022-12-28T02:40:21.713456 | 2020-03-03T09:04:30 | 2020-03-03T09:04:30 | 204,904,444 | 2 | 1 | Apache-2.0 | 2022-12-08T06:19:04 | 2019-08-28T10:18:05 | Python | UTF-8 | Python | false | false | 561 | py | # coding=<encoding name>
from aip import AipSpeech
# 百度智能云账户参数配置
APP_ID = '17134145'
API_KEY = 'yDTW0ljcQd24ZKyaHYRTDleX'
SECRET_KEY = 'O6de7NZmhxd6KZILjZj2oHoqITdRoHyg'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
# 读取文件
def get_file_content(file_path):
with open(file_path, 'rb') as fp:
return fp.read()
# 识别本地文件
def do_tts(file_path, file_format):
ret = client.asr(get_file_content(file_path), file_format, 16000, {
'dev_pid': 1537,
})
return ret['result'][0].encode('utf-8')
| [
"[email protected]"
] | |
b6dfbdbb85b92e08ea92fb40b6b8d4f6337b17e6 | 525c6a69bcf924f0309b69f1d3aff341b06feb8e | /sunyata/layer/recurrent/gru.py | acaaf611f7d5a7031bf259e99180d2c6b3d077f7 | [] | no_license | knighton/sunyata_2017 | ba3af4f17184d92f6277d428a81802ac12ef50a4 | 4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8 | refs/heads/master | 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | from ... import backend as Z
from ... import init
from ..base import LinkBuilder
from .base import RecurrentLayer, RecurrentSpec
class GRULayer(RecurrentLayer):
def __init__(self, forward, last, input_kernel, recurrent_kernel, bias):
dim = input_kernel.shape[1] // 3
dtype = input_kernel.dtype.name
super().__init__(dim, dtype, forward, last)
self.input_kernel = self.add_param(input_kernel)
self.recurrent_kernel = self.add_param(recurrent_kernel)
self.bias = self.add_param(bias)
i = 2 * self.out_dim
self.reset_update_input_kernel = self.input_kernel[:, :i]
self.reset_update_recurrent_kernel = self.recurrent_kernel[:, :i]
self.reset_update_bias = self.bias[:i]
self.new_input_kernel = self.input_kernel[:, i:]
self.new_recurrent_kernel = self.recurrent_kernel[:, i:]
self.new_bias = self.bias[i:]
def step(self, x, prev_state, prev_internal_state):
gates = Z.sigmoid(
Z.matmul(x, self.reset_update_input_kernel) +
Z.matmul(prev_state, self.reset_update_recurrent_kernel) +
self.reset_update_bias)
i = self.out_dim
reset_gate = gates[:, :i]
update_gate = gates[:, i:2 * i]
new_state = Z.tanh(
Z.matmul(x, self.new_input_kernel) +
Z.matmul(reset_gate * prev_state, self.new_recurrent_kernel) +
self.new_bias)
state = update_gate * prev_state + (1 - update_gate) * new_state
return state, None
class GRUSpec(RecurrentSpec):
def __init__(self, dim=None, forward=True, last=False,
input_kernel_init='glorot_uniform',
recurrent_kernel_init='orthogonal', bias_init='zeros'):
super().__init__(dim, forward, last)
self.input_kernel_init = init.get(input_kernel_init)
self.recurrent_kernel_init = init.get(recurrent_kernel_init)
self.bias_init = init.get(bias_init)
def make_layer(self, in_dim, out_dim, dtype):
input_kernel_shape = in_dim, 3 * out_dim
input_kernel = self.input_kernel_init(
input_kernel_shape, dtype, 'conv_kernel')
recurrent_kernel_shape = out_dim, 3 * out_dim
recurrent_kernel = self.recurrent_kernel_init(
recurrent_kernel_shape, dtype)
bias_shape = 3 * out_dim,
bias = self.bias_init(bias_shape, dtype)
return GRULayer(self.go_forward, self.ret_last, input_kernel,
recurrent_kernel, bias)
GRU = LinkBuilder(GRUSpec)
| [
"[email protected]"
] | |
4a0ccc2edeb0ad94a72db5238966a8e5fe4d0216 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/networks_utils.py | 20c83238fe96135876a0ed78af16ad4cb15c5574 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,052 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code that's shared between multiple networks subcommands."""
def _GetNetworkMode(network):
"""Takes a network resource and returns the "mode" of the network."""
if network.get('IPv4Range', None) is not None:
return 'legacy'
if network.get('autoCreateSubnetworks', False):
return 'auto'
else:
return 'custom'
def AddMode(items):
for resource in items:
resource['x_gcloud_mode'] = _GetNetworkMode(resource)
yield resource
| [
"[email protected]"
] | |
b1125a3e2b251056d0f0fe3c80abe500ca8d6117 | 3ae62276c9aad8b9612d3073679b5cf3cb695e38 | /easyleetcode/leetcodes/Leetcode_685_Redundant_Connection_II.py | 13fe787471a52795e9edcc8704bc9384895a7294 | [
"Apache-2.0"
] | permissive | gongtian1234/easy_leetcode | bc0b33c3c4f61d58a6111d76707903efe0510cb4 | d2b8eb5d2cafc71ee1ca633ce489c1a52bcc39ce | refs/heads/master | 2022-11-16T17:48:33.596752 | 2020-07-13T02:55:03 | 2020-07-13T02:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | print('Leetcode_685_Redundant_Connection_II') | [
"[email protected]"
] | |
aeccd1e46dde4148b0683a43aaf35d79b4119967 | 5932b41c973fb4f0d61ea2668a3036bd2af31903 | /ue4docker/infrastructure/BuildConfiguration.py | 30d53fc7710820b4646512a3355a42b508a2ec20 | [
"MIT"
] | permissive | hackertron/ue4-docker | d711290fba75cfdf4509762cd301dec7796191f5 | f849ae89f75644c5f34276e8ebe76ef03528029c | refs/heads/master | 2020-04-19T22:51:26.502673 | 2019-01-29T08:48:54 | 2019-01-29T08:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,589 | py | from .PackageUtils import PackageUtils
from .WindowsUtils import WindowsUtils
import humanfriendly, os, platform, random
# Import the `semver` package even when the conflicting `node-semver` package is present
semver = PackageUtils.importFile('semver', os.path.join(PackageUtils.getPackageLocation('semver'), 'semver.py'))
# The default Unreal Engine git repository
DEFAULT_GIT_REPO = 'https://github.com/EpicGames/UnrealEngine.git'
# The base images for Linux containers
LINUX_BASE_IMAGES = {
'opengl': 'nvidia/opengl:1.0-glvnd-devel-ubuntu18.04',
'cudagl': {
'9.2': 'nvidia/cudagl:9.2-devel-ubuntu18.04',
'10.0': 'nvidia/cudagl:10.0-devel-ubuntu18.04'
}
}
# The default CUDA version to use when `--cuda` is specified without a value
DEFAULT_CUDA_VERSION = '9.2'
# The default memory limit (in GB) under Windows
DEFAULT_MEMORY_LIMIT = 10.0
class BuildConfiguration(object):
def __init__(self, args):
'''
Creates a new build configuration based on the supplied arguments object
'''
# Determine if we are building a custom version of UE4 rather than an official release
if args.release.lower() == 'custom':
# Both a custom repository and a custom branch/tag must be specified
if args.repo is None or args.branch is None:
raise RuntimeError('both a repository and branch/tag must be specified when building a custom version of the Engine')
# Use the specified repository and branch/tag
self.release = 'custom'
self.repository = args.repo
self.branch = args.branch
else:
# Validate the specified version string
try:
ue4Version = semver.parse(args.release)
if ue4Version['major'] != 4 or ue4Version['prerelease'] != None:
raise Exception()
self.release = semver.format_version(ue4Version['major'], ue4Version['minor'], ue4Version['patch'])
except:
raise RuntimeError('invalid UE4 release number "{}", full semver format required (e.g. "4.19.0")'.format(args.release))
# Use the default repository and the release tag for the specified version
self.repository = DEFAULT_GIT_REPO
self.branch = '{}-release'.format(self.release)
# Store our common configuration settings
self.containerPlatform = 'windows' if platform.system() == 'Windows' and args.linux == False else 'linux'
self.dryRun = args.dry_run
self.rebuild = args.rebuild
self.pullPrerequisites = args.pull_prerequisites
self.noEngine = args.no_engine
self.noMinimal = args.no_minimal
self.noFull = args.no_full
self.suffix = args.suffix
self.platformArgs = ['--no-cache'] if args.no_cache == True else []
self.baseImage = None
self.prereqsTag = None
# If we're building Windows containers, generate our Windows-specific configuration settings
if self.containerPlatform == 'windows':
self._generateWindowsConfig(args)
# If we're building Linux containers, generate our Linux-specific configuration settings
if self.containerPlatform == 'linux':
self._generateLinuxConfig(args)
def _generateWindowsConfig(self, args):
# Store the path to the directory containing our required Windows DLL files
self.defaultDllDir = os.path.join(os.environ['SystemRoot'], 'System32')
self.dlldir = args.dlldir if args.dlldir is not None else self.defaultDllDir
# Determine base tag for the Windows release of the host system
self.hostRelease = WindowsUtils.getWindowsRelease()
self.hostBasetag = WindowsUtils.getReleaseBaseTag(self.hostRelease)
# Store the tag for the base Windows Server Core image
self.basetag = args.basetag if args.basetag is not None else self.hostBasetag
self.baseImage = 'microsoft/dotnet-framework:4.7.2-sdk-windowsservercore-' + self.basetag
self.prereqsTag = self.basetag
# Verify that any user-specified base tag is valid
if WindowsUtils.isValidBaseTag(self.basetag) == False:
raise RuntimeError('unrecognised Windows Server Core base image tag "{}", supported tags are {}'.format(self.basetag, WindowsUtils.getValidBaseTags()))
# Set the memory limit Docker flags
if args.m is not None:
try:
self.memLimit = humanfriendly.parse_size(args.m) / (1000*1000*1000)
except:
raise RuntimeError('invalid memory limit "{}"'.format(args.m))
else:
self.memLimit = DEFAULT_MEMORY_LIMIT if args.random_memory == False else random.uniform(DEFAULT_MEMORY_LIMIT, DEFAULT_MEMORY_LIMIT + 2.0)
self.platformArgs.extend(['-m', '{:.2f}GB'.format(self.memLimit)])
# Set the isolation mode Docker flags
self.isolation = args.isolation if args.isolation is not None else 'default'
if self.isolation != 'default':
self.platformArgs.append('-isolation=' + self.isolation)
# Set the PDB truncation Docker flags
self.keepDebug = args.keep_debug
if self.keepDebug == True:
self.platformArgs.extend(['--build-arg', 'KEEP_DEBUG=1'])
def _generateLinuxConfig(self, args):
# Determine if we are building CUDA-enabled container images
self.cuda = None
if args.cuda is not None:
# Verify that the specified CUDA version is valid
self.cuda = args.cuda if args.cuda != '' else DEFAULT_CUDA_VERSION
if self.cuda not in LINUX_BASE_IMAGES['cudagl']:
raise RuntimeError('unsupported CUDA version "{}", supported versions are: {}'.format(
self.cuda,
', '.join([v for v in LINUX_BASE_IMAGES['cudagl']])
))
# Use the appropriate base image for the specified CUDA version
self.baseImage = LINUX_BASE_IMAGES['cudagl'][self.cuda]
self.prereqsTag = 'cudagl{}'.format(self.cuda)
else:
self.baseImage = LINUX_BASE_IMAGES['opengl']
self.prereqsTag = 'opengl'
| [
"[email protected]"
] | |
c3503b21d2104e0e4b8f3d87ebe23954c6c35551 | d12cadea9d18ec6599b2ef650ce6af1a0d39afb1 | /Server.py | 5b0908b7852dfeadf2bb64c4853ad4c9596f6219 | [] | no_license | zhantong/wetwo-server | 074facf45695f054855eaff26bd2dc3272a1aba0 | 7271acc9047da65d647a572160683b043b8163a1 | refs/heads/master | 2021-08-27T19:41:40.549066 | 2017-11-28T05:19:50 | 2017-11-28T05:19:50 | 103,080,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,088 | py | from flask import Flask, request, render_template, redirect, jsonify, g
import flask_login
from WeTwo import WeTwo
app = Flask(__name__)
app.secret_key = '\xecG>\xc3\xe6\xe5\xbds\xa5\xf1\xae\x81u\x19\xb0`\x88W\xc6\\\xb7\xfeL\xcc'
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
class User(flask_login.UserMixin):
pass
def get_wetwo():
if not hasattr(g, 'wetwo'):
g.wetwo = WeTwo()
return g.wetwo
@login_manager.user_loader
def user_loader(user_id):
if not get_wetwo().is_user_id_exists(user_id):
return
user = User()
user.id = user_id
return user
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
user_name = request.form['name']
password = request.form['password']
if user_name and password and get_wetwo().is_password_correct(user_name=user_name, password=password):
user_id = get_wetwo().get_user_id(user_name)
user = User()
user.id = user_id
flask_login.login_user(user)
return 'Login Success'
return 'Bad Login'
@app.route('/api/login', methods=['POST'])
def api_login():
user_name = request.form['name']
password = request.form['password']
if user_name and password and get_wetwo().is_password_correct(user_name=user_name, password=password):
user_id = get_wetwo().get_user_id(user_name)
user = User()
user.id = user_id
flask_login.login_user(user)
return jsonify({'status': True, 'message': '登录成功'})
return jsonify({'status': False, 'message': '登录失败'})
@app.route('/logout')
def logout():
flask_login.logout_user()
return 'Logged out'
@app.route('/api/logout')
def api_logout():
flask_login.logout_user()
return jsonify({'status': True, 'message': '注销成功'})
@app.route('/')
@flask_login.login_required
def index():
articles = get_wetwo().get_articles()
for article in articles:
article['comments'] = get_wetwo().get_comments(article['article_id'])
return render_template('index.html', articles=articles)
@app.route('/api/getUserInfo')
@flask_login.login_required
def api_get_user_info():
user_id = flask_login.current_user.id
user_name = get_wetwo().get_user_name(user_id)
num_unread_notifications = get_wetwo().get_num_unread_comments(user_id)
info = {
'id': user_id,
'name': user_name,
'num_unread_notifications': num_unread_notifications
}
return jsonify(info)
@app.route('/api/getAllArticles')
@flask_login.login_required
def api_get_all_articles():
offset = request.args['offset'] if 'offset' in request.args else 0
limit = request.args['limit'] if 'limit' in request.args else 20
articles = get_wetwo().get_articles(offset=offset, limit=limit)
for article in articles:
article['comments'] = get_wetwo().get_comments(article['article_id'])
return jsonify(articles)
@app.route('/api/getArticles')
@flask_login.login_required
def api_get_articles():
user_id = flask_login.current_user.id
articles = get_wetwo().get_articles(user_id)
for article in articles:
article['comments'] = get_wetwo().get_comments(article['article_id'])
return jsonify(articles)
@app.route('/api/getArticle')
@flask_login.login_required
def api_get_article():
article_id = request.args['articleId']
article = get_wetwo().get_article(article_id)
article['comments'] = get_wetwo().get_comments(article['article_id'])
return jsonify(article)
@app.route('/postArticle', methods=['POST'])
@flask_login.login_required
def post_article():
article = request.form['article']
user_id = flask_login.current_user.id
article_id = get_wetwo().post_article(article, user_id)
return redirect('/')
@app.route('/api/postArticle', methods=['POST'])
@flask_login.login_required
def api_post_article():
article = request.form['article']
time = request.form['time'] if 'time' in request.form else None
user_id = flask_login.current_user.id
article_id = get_wetwo().post_article(article, user_id, time)
return jsonify({'status': True, 'articleId': article_id})
@app.route('/postComment', methods=['POST'])
@flask_login.login_required
def post_comment():
article_id = request.form['articleId']
comment = request.form['comment']
parent_comment_id = request.form['parentCommentId']
user_id = flask_login.current_user.id
get_wetwo().post_comment(article_id, user_id, comment, parent_comment_id)
article = get_wetwo().get_article(article_id)
article['comments'] = get_wetwo().get_comments(article_id)
return render_template('comment.html', article=article)
@app.route('/api/postComment', methods=['POST'])
@flask_login.login_required
def api_post_comment():
article_id = request.form['articleId']
comment = request.form['comment']
parent_comment_id = request.form['parentCommentId']
time = request.form['time'] if 'time' in request.form else None
user_id = flask_login.current_user.id if 'userId' not in request.form else request.form['userId']
comment_id = get_wetwo().post_comment(article_id, user_id, comment, parent_comment_id, time)
return jsonify({'status': True, 'commentId': comment_id})
@app.route('/api/getUnreadComments')
@flask_login.login_required
def api_get_unread_comments():
user_id = flask_login.current_user.id
comments = get_wetwo().get_unread_comments(user_id)
return jsonify(comments)
@app.route('/api/setCommentRead', methods=['POST'])
@flask_login.login_required
def api_set_comment_read():
comment_id = request.form['commentId']
get_wetwo().set_comment_read(comment_id)
return jsonify({'status': True})
@app.route('/protected')
@flask_login.login_required
def protected():
return 'Logged in as: ' + flask_login.current_user.id
@app.teardown_appcontext
def teardown_appcontext(error):
if hasattr(g, 'wetwo'):
del g.wetwo
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.