blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc94576c94c792df77ee28ae73dd6f41f0c2d08b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_065/ch59_2020_03_04_19_22_17_952459.py | c7342597c4c20377297e4677c63dc63c883b744b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | def asteriscos(n):
result = '*' * n
return result | [
"[email protected]"
] | |
96b751bafee5bfec57c1900b3f0737d33f666c7b | 729ee5bcb31708a82b08509775786597dac02263 | /coding-challenges/week09/day05/ccQ1.py | 01507bc127c3a7c3790250ee8b5756ef255aa621 | [] | no_license | pandey-ankur-au17/Python | 67c2478316df30c2ac8ceffa6704cf5701161c27 | 287007646a694a0dd6221d02b47923935a66fcf4 | refs/heads/master | 2023-08-30T05:29:24.440447 | 2021-09-25T16:07:23 | 2021-09-25T16:07:23 | 358,367,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | """
Q-1 ) Squares of a Sorted Array:(5 marks) (easy)
https://leetcode.com/problems/squares-of-a-sorted-array/
Given an integer array nums sorted in non-decreasing order, return an array of the
squares of each number sorted in non-decreasing order.
Example 1:
Input: nums = [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Explanation: After squaring, the array becomes [16,1,0,9,100].
After sorting, it becomes [0,1,9,16,100].
"""
def SortedArray(nums):
n = len(nums)
i = 0
j = n - 1
k = n - 1
result = list(range(n))
while i <= j:
SqrNg = nums[i] * nums[i]
SqrPo = nums[j] * nums[j]
if SqrNg < SqrPo:
result[k] = SqrPo
j = j - 1
else:
result[k] = SqrNg
i = i + 1
k = k - 1
return result
if __name__ == "__main__":
nums = [-4,-1,0,3,10]
res = SortedArray(nums)
print(res) | [
"[email protected]"
] | |
feed39e1f437c4d336656b405b1148f3b07bb364 | cfc7eed97d4987dbe80026205b7a127f89974d51 | /ebcli/controllers/codesource.py | 6fc3968ac2ad924babbabd2783fc67143c6b4fbd | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | stefansundin/awsebcli | bf71872328c4d94f073d5d0ae0740a0316d56fcf | 8e17c8ad3d24e3c4cef9a4c5dfc6cae61bd7066d | refs/heads/main | 2022-12-06T06:34:52.601029 | 2022-02-04T05:40:53 | 2022-11-20T01:38:26 | 230,182,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.lib import utils
from ebcli.core import io
from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.resources.strings import strings, flag_text, prompts
from ebcli.operations import gitops
class CodeSourceController(AbstractBaseController):
class Meta(AbstractBaseController.Meta):
label = 'codesource'
description = strings['codesource.info']
arguments = [
(
['sourcename'],
dict(
action='store',
nargs='?',
help=flag_text['codesource.sourcename'],
choices=['codecommit', 'local'],
type=str.lower
)
),
]
usage = 'eb codesource <sourcename> [options ...]'
def do_command(self):
sourcename = self.app.pargs.sourcename
if sourcename is not None:
if sourcename == 'local':
gitops.print_current_codecommit_settings()
self.set_local()
if sourcename == 'codecommit':
self.set_codecommit()
else:
self.prompt_for_codesource()
def prompt_for_codesource(self):
gitops.print_current_codecommit_settings()
io.echo(prompts['codesource.codesourceprompt'])
setup_choices = ['CodeCommit', 'Local']
choice = utils.prompt_for_item_in_list(setup_choices, 2)
if choice == setup_choices[0]:
self.set_codecommit()
elif choice == setup_choices[1]:
self.set_local()
def set_local(self):
gitops.disable_codecommit()
io.echo(strings['codesource.localmsg'])
def set_codecommit(self):
gitops.initialize_codecommit()
| [
"[email protected]"
] | |
c7fbb95fa05343cc561f50c34178cda5f263255f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_363/ch18_2020_09_16_12_12_05_478212.py | d5e7f259a6b779b713536a1cdce9be08e76ba7cf | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | def testa_maioridade(idade):
if idade >= 21:
return 'Liberado EUA e BRASIL'
else:
if idade >= 18:
return 'Liberado BRASIL'
else:
return 'Não está liberado'
print(testa_maioridade(17))
print(testa_maioridade(20))
print(testa_maioridade(21)) | [
"[email protected]"
] | |
85dd60d1a0c3316bda5a5dcf3306e7bf740b7417 | b07c4f4b99a46689a650d52bf1bd1d32160f06d3 | /tests/test_cps324.py | f14fcdd50b208eaae6ee51e93dfb35fd723dfb9a | [] | no_license | nuxeo-cps/products--CPSUpgradeTests | 2d67652c26fc212c9ec9864a76b0a7b1f819e2c9 | e3b1f94eaf78278b529561b2384ea3a3479123b3 | refs/heads/main | 2023-01-22T00:46:51.434789 | 2006-09-02T08:22:30 | 2006-09-02T08:22:30 | 317,994,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | # Upgrade from CPS 3.2.4
DB_NAME = 'cps324'
import os
import unittest
# Warning, nifty tapdance ahead:
# When you import testing, it sets testing home to
# $SOFTWARE_HOME/lib/python/Testing
import Testing
# But we want it to be in a directory with our custom_zodb.py, so we set it,
# but only after importing Testing (or it will be reset later).
import App.config
cfg = App.config.getConfiguration()
cfg.testinghome = os.path.join(os.path.dirname(__file__), DB_NAME)
# During the import of the ZopeLite module, the Zope Application will be
# started, and it will now use our testinghome, find our custom_zodb.py and
# use our custom ZODB.
# Actually, we import upgradetestcase, which in turn imports ZopeTestCase,
# which in turn imports ZopeLite, which in turns starts Zope.
from upgradetestcase import PreGenericSetupTestCase
# Tapdance ends.
class TestUpgrade(PreGenericSetupTestCase):
db_dir = DB_NAME
def test_upgrade(self):
self._upgrade()
self._verifyDocument()
self._verifyPublishing()
self._verifyCalendaring()
self._verifyNewsItem()
self._checkSubGroupSupport()
self._verifyFolderDestruction()
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestUpgrade),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"devnull@localhost"
] | devnull@localhost |
8c09c475eebebeba17d6965c5c16882309111a9f | b8441dc1987be9e64fa3081d456b2a3060ec44d1 | /mars/core/graph/builder/tileable.py | ddfbf93711c35982d8d457f21204d791adbbb977 | [
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mars-project/mars | f99fefbce999d58a9249bc72046787a9731c9c73 | c36c53fa22e10ef9477d9c454401a2f281375f31 | refs/heads/master | 2023-07-23T00:23:55.133015 | 2023-07-03T11:44:54 | 2023-07-03T11:44:54 | 160,543,708 | 2,704 | 362 | Apache-2.0 | 2023-09-11T07:57:35 | 2018-12-05T16:04:03 | Python | UTF-8 | Python | false | false | 1,230 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Generator
from ...mode import enter_mode
from ..entity import TileableGraph, ChunkGraph
from .base import AbstractGraphBuilder
class TileableGraphBuilder(AbstractGraphBuilder):
_graph: TileableGraph
def __init__(self, graph: TileableGraph):
super().__init__(graph=graph)
@enter_mode(build=True, kernel=True)
def _build(self) -> Union[TileableGraph, ChunkGraph]:
self._add_nodes(self._graph, list(self._graph.result_tileables), set())
return self._graph
def build(self) -> Generator[Union[TileableGraph, ChunkGraph], None, None]:
yield self._build()
| [
"[email protected]"
] | |
e38060a8c7d9bb18f3deb109b85e49558db91fda | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/147/61122/submittedfiles/testes.py | 06774e42c5ec729f01f08e760f84be3690f8d627 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('digite n:'))
x1=n//1000
b=n//1000
b2=b%100
x2=b2//100
print(x1)
print(x2)
| [
"[email protected]"
] | |
48fd13cd46e26454f058944a362e8996ca192344 | 2edf3a0d21117c65dffe87c3da81365c77d66679 | /dfirtrack_main/tests/system/test_system_importer_file_csv_config_based_forms.py | baa1cddf83741025adb6aacefe2ee628c2689cb3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | fxcebx/dfirtrack | 003748305aa412aa9ec043faa98dac45d3053b5c | 20acf4e508aeef9faf2ed1d2195918b6640c1307 | refs/heads/master | 2022-12-10T02:25:47.676855 | 2020-09-24T23:15:42 | 2020-09-24T23:15:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvConfigbasedForm
class SystemImporterFileCsvConfigbasedFormTestCase(TestCase):
""" system importer file CSV config-based form tests """
def test_system_importer_file_csv_config_based_systemcsv_form_label(self):
""" test form label """
# get object
form = SystemImporterFileCsvConfigbasedForm()
# compare
self.assertEqual(form.fields['systemcsv'].label, 'CSV with systems (*)')
def test_system_importer_file_csv_config_based_form_empty(self):
""" test minimum form requirements / INVALID """
# get object
form = SystemImporterFileCsvConfigbasedForm(data = {})
# compare
self.assertFalse(form.is_valid())
def test_system_importer_file_csv_config_based_systemcsv_form_filled(self):
""" test minimum form requirements / VALID """
# get file
upload_csv = open('example_data/dfirtrack_main_importer_file_csv_system__valid.csv', 'rb')
# create dictionaries
data_dict = {}
file_dict = {
'systemcsv': SimpleUploadedFile(upload_csv.name, upload_csv.read()),
}
# get object
form = SystemImporterFileCsvConfigbasedForm(
data = data_dict,
files = file_dict,
)
# close file
upload_csv.close()
# compare
self.assertTrue(form.is_valid())
| [
"[email protected]"
] | |
7c34356fc7693cae881d92047c8d025ff83373d7 | 41f548fc3052d4cd3a94e3171a0e2120705ed760 | /Gomine_DOC_Unicode/Old_crawl/shiye/shiye/items.py | ecb978c4f13f93ff5406aee5a8d1ec921ae69426 | [] | no_license | SuperShen9/Scrapy | 806f972bcd05d85bf02349c5ee7711af550c8568 | cbe141f697596d5a384bb968d7343194236a541f | refs/heads/master | 2021-01-19T13:04:19.957911 | 2018-06-27T23:47:21 | 2018-06-27T23:47:21 | 88,060,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ShiyeItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
code=scrapy.Field()
url=scrapy.Field()
pass
| [
"[email protected]"
] | |
e984ed448f3a0a8dc1424728498e0d9e98beb857 | 0032c98333ffc0efdb920ecca31ab224378880e5 | /rpi-tutorial/Servo2.py | a25b57ffaefc303c79cc41c4e84ef8fd55d8d646 | [] | no_license | raspibrick/install | bd1c6f9a8cb524f2ab5a2c17ad8c5463b768dffa | 96288d6ca21abd8fb993cc376e37c16473b54dd5 | refs/heads/master | 2021-01-10T05:00:39.159879 | 2019-07-25T09:46:04 | 2019-07-25T09:46:04 | 40,703,681 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | # Servo2.py
# Two servo motors driven by PCA9685 chip
from smbus import SMBus
from PCA9685 import PWM
import time
i2c_address = 0x40
fPWM = 50
channel = 1
a = 8.5
b = 3
def setup():
global pwm
bus = SMBus(1) # Raspberry Pi revision 2
pwm = PWM(bus, i2c_address)
pwm.setFreq(fPWM)
def setDirection(direction):
duty = a / 180 * direction + b
pwm.setDuty(channel, duty)
print "direction =", direction, "-> duty =", duty
time.sleep(0.5) # allow to settle
print "starting"
setup()
channel = 0
for direction in range(0, 91, 10):
setDirection(direction)
direction = 0
setDirection(0)
channel = 1
for direction in range(0, 91, 10):
setDirection(direction)
direction = 0
setDirection(0)
print "done"
| [
"[email protected]"
] | |
d0585631be5a98578e7397c70df0b3441eda5577 | 72d6b3ab3fc2c7014967a156de082d1c617cbf0f | /操作数据库/使用Flask连接MySQL_将话务数据入库.py | 04fe27cd42ae98af04094148fdc468a3a171760e | [] | no_license | fengmingshan/python | 19a1732591ad061a8291c7c84e6f00200c106f38 | b35dbad091c9feb47d1f0edd82e568c066f3c6e9 | refs/heads/master | 2021-06-03T08:35:50.019745 | 2021-01-19T15:12:01 | 2021-01-19T15:12:01 | 117,310,092 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,285 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 10:16:45 2020
@author: Administrator
"""
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
import os
work_path = 'd:/_python/python/操作数据库/'
os.chdir(work_path)
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:a123456@localhost:3306/eric_traffic?charset=utf8"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['SQLALCHEMY_COMMMIT_ON_TEARDOWN'] = True
# 建立数据库对象
db = SQLAlchemy(app)
#db = SQLAlchemy(app, use_native_unicode='utf8')
title = ['key',
'week',
'eNodeB',
'EUTRANCELLFDD',
'Acc_WirelessConnSucRate',
'Acc_ERAB_droppingrate',
'AirInterface_Traffic_Volume_UL_MBytes',
'AirInterface_Traffic_Volume_DL_MBytes',
'Int_DownlinkLatency',
'MaxnumberofUEinRRc',
'pmCellDowntimeAuto1',
'pmCellDowntimeMan1',
'Data_Coverage',
'Ava_CellAvail',
'NumofLTERedirectto3G',
'AvgNumberofULActiveUsers',
'AvgNumberofDLActiveUsers',
'DL_Util_of_PRB',
'DLactiveuesum',
'CellPDCPDLbit',
'AvgUserFellThroughput_Mbps'
]
df_eric = pd.read_csv('./爱立信0224-0301_mini.csv', header=None, names=title)
df_eric = df_eric[['key',
'week',
'eNodeB',
'EUTRANCELLFDD',
'Acc_WirelessConnSucRate',
'Acc_ERAB_droppingrate',
'AirInterface_Traffic_Volume_UL_MBytes',
'AirInterface_Traffic_Volume_DL_MBytes',
'Int_DownlinkLatency',
'MaxnumberofUEinRRc',
'AvgNumberofULActiveUsers',
'AvgNumberofDLActiveUsers',
'DL_Util_of_PRB',
'AvgUserFellThroughput_Mbps'
]]
# 建立数据库类,用来映射到数据库中的表。
class Eric_day(db.Model):
# 声明表名
__tablename__ = 'eric_day'
# 建立字段函数
key = db.Column(db.String(200), primary_key=True)
week = db.Column(db.Integer)
eNodeB = db.Column(db.String(200))
EUTRANCELLFDD = db.Column(db.String(200))
Acc_WirelessConnSucRate = db.Column(db.Float)
Acc_ERAB_droppingrate = db.Column(db.Float)
AirInterface_Traffic_Volume_UL_MBytes = db.Column(db.Float)
AirInterface_Traffic_Volume_DL_MBytes = db.Column(db.Float)
Int_DownlinkLatency = db.Column(db.Float)
MaxnumberofUEinRRc = db.Column(db.Integer)
AvgNumberofULActiveUsers = db.Column(db.Float)
AvgNumberofDLActiveUsers = db.Column(db.Float)
DL_Util_of_PRB = db.Column(db.Float)
AvgUserFellThroughput_Mbps = db.Column(db.Float)
def __repr__(self):
return '<User key: {}, week: {}, eNodeB: {}, EUTRANCELLFDD: {}, Acc_WirelessConnSucRate: {}, Acc_ERAB_droppingrate: {}>'.format(
self.key, self.week, self.eNodeB, self.EUTRANCELLFDD, self.Acc_WirelessConnSucRate, self.Acc_ERAB_droppingrate)
#db.drop_all()
db.create_all()
# =============================================================================
# 导入数据
# =============================================================================
traffic_data = [Eric_day(
key=key,
week=wk,
eNodeB=enb,
EUTRANCELLFDD=cell,
Acc_WirelessConnSucRate=accrate,
Acc_ERAB_droppingrate=drop,
AirInterface_Traffic_Volume_UL_MBytes=uth,
AirInterface_Traffic_Volume_DL_MBytes=dth,
Int_DownlinkLatency=lat,
MaxnumberofUEinRRc=mrrc,
AvgNumberofULActiveUsers=uact,
AvgNumberofDLActiveUsers=dact,
DL_Util_of_PRB=prb,
AvgUserFellThroughput_Mbps=fell
) for key,wk, enb, cell, accrate, drop, uth, dth, lat, mrrc, uact, dact, prb, fell in zip(
df_eric['key'],
df_eric['week'],
df_eric['eNodeB'],
df_eric['EUTRANCELLFDD'],
df_eric['Acc_WirelessConnSucRate'],
df_eric['Acc_ERAB_droppingrate'],
df_eric['AirInterface_Traffic_Volume_UL_MBytes'],
df_eric['AirInterface_Traffic_Volume_DL_MBytes'],
df_eric['Int_DownlinkLatency'],
df_eric['MaxnumberofUEinRRc'],
df_eric['AvgNumberofULActiveUsers'],
df_eric['AvgNumberofDLActiveUsers'],
df_eric['DL_Util_of_PRB'],
df_eric['AvgUserFellThroughput_Mbps']
)]
for item in traffic_data:
db.session.add(item)
db.session.commit()
# 原生SQL语句方式
#db.session.execute(r'insert into user values (8, "wjz", "test123")')
#db.session.execute(r'insert into user values (9, "wjz", "admin123")')
#
#db.session.commit()
# =============================================================================
# 查表
# =============================================================================
# ORM方式
btslist = Eric_day.query.order_by('eNodeB').all()
# 使用class User定义好的格式进行print
for bts in btslist:
print(bts)
# 自定义格式print
for bts in btslist:
print(bts.week, ' ', bts.eNodeB, ' ', bts.EUTRANCELLFDD, ' ', bts.Acc_WirelessConnSucRate, ' ', bts.Acc_ERAB_droppingrate)
# 原生数据库语句_推荐
item = db.session.execute('select * from user order by id asc')
# #将结果集强转为list
item = list(item)
for i in item:
print(i)
# =============================================================================
# 删除内容
# =============================================================================
# ORM方式
# User.query.filter_by(id=6).delete()
# User.query.filter_by(id=7).delete()
# User.query.filter_by(id=8).delete()
# User.query.filter_by(id=9).delete()
# db.session.commit()
#
# 原生SQL语句方式
#db.session.execute(r'delete from user where id = 7')
# db.session.commit()
# =============================================================================
# 修改内容
# =============================================================================
# ORM方式
# User.query.filter_by(id=3).update({'name':'张三'})
# User.query.filter_by(id=4).update({'name':'李四'})
# db.session.commit()
#
# 原生SQL语句方式
#db.session.execute(r'update user set name="李四" where id= 4')
#db.session.execute(r'update user set name="王二" where id= 5')
# db.session.commit()
#
#userlist1 = User.query.order_by('id').all()
| [
"[email protected]"
] | |
211f00020a05944b181487ed9873e302b77af690 | 818afe5e04bcba8b8dfcd37c17a797d5a677725d | /baselines/cifar/data_utils.py | 260fe8f7100e8b11845eba45f58022f4ef5c8180 | [
"Apache-2.0"
] | permissive | nizamphoenix/uncertainty-baselines | 01614eea136603def0dc2a942c83b6fb1df589f2 | 0a7fbbac463788533cc5f26109e616971573dd2e | refs/heads/master | 2023-01-31T16:01:18.397839 | 2020-12-14T22:19:15 | 2020-12-14T22:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,699 | py | # coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data utilities for CIFAR-10 and CIFAR-100."""
import functools
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import augment_utils # local file import
tfd = tfp.distributions
def normalize_convert_image(input_image, dtype):
input_image = tf.image.convert_image_dtype(input_image, dtype)
mean = tf.constant([0.4914, 0.4822, 0.4465])
std = tf.constant([0.2023, 0.1994, 0.2010])
return (input_image - mean) / std
def load_dataset(split,
batch_size,
name,
use_bfloat16,
normalize=True,
drop_remainder=True,
proportion=1.0,
validation_set=False,
validation_proportion=0.05,
aug_params=None):
"""Loads CIFAR dataset for training or testing.
Args:
split: tfds.Split.
batch_size: The global batch size to use.
name: A string indicates whether it is cifar10 or cifar100.
use_bfloat16: data type, bfloat16 precision or float32.
normalize: Whether to apply mean-std normalization on features.
drop_remainder: bool.
proportion: float, the proportion of dataset to be used.
validation_set: bool, whether to split a validation set from training data.
validation_proportion: float, the proportion of training dataset to be used
as the validation split, if validation_set is set to True.
aug_params: dict, data augmentation hyper parameters.
Returns:
Input function which returns a locally-sharded dataset batch.
"""
if proportion < 0. or proportion > 1.:
raise ValueError('proportion needs to lie in the range [0, 1]')
if validation_proportion < 0. or validation_proportion > 1.:
raise ValueError('validation_proportion needs to lie in the range [0, 1]')
if use_bfloat16:
dtype = tf.bfloat16
else:
dtype = tf.float32
ds_info = tfds.builder(name).info
image_shape = ds_info.features['image'].shape
dataset_size = ds_info.splits['train'].num_examples
num_classes = ds_info.features['label'].num_classes
if aug_params is None:
aug_params = {}
adaptive_mixup = aug_params.get('adaptive_mixup', False)
random_augment = aug_params.get('random_augment', False)
mixup_alpha = aug_params.get('mixup_alpha', 0)
ensemble_size = aug_params.get('ensemble_size', 1)
label_smoothing = aug_params.get('label_smoothing', 0.)
if adaptive_mixup and 'mixup_coeff' not in aug_params:
# Hard target in the first epoch!
aug_params['mixup_coeff'] = tf.ones([ensemble_size, num_classes])
if mixup_alpha > 0 or label_smoothing > 0:
onehot = True
else:
onehot = False
def preprocess(image, label):
"""Image preprocessing function."""
if split == tfds.Split.TRAIN:
image = tf.image.resize_with_crop_or_pad(
image, image_shape[0] + 4, image_shape[1] + 4)
image = tf.image.random_crop(image, image_shape)
image = tf.image.random_flip_left_right(image)
# Only random augment for now.
if random_augment:
count = aug_params['aug_count']
augmenter = augment_utils.RandAugment()
augmented = [augmenter.distort(image) for _ in range(count)]
image = tf.stack(augmented)
if split == tfds.Split.TRAIN and aug_params['augmix']:
augmenter = augment_utils.RandAugment()
image = _augmix(image, aug_params, augmenter, dtype)
elif normalize:
image = normalize_convert_image(image, dtype)
if split == tfds.Split.TRAIN and onehot:
label = tf.cast(label, tf.int32)
label = tf.one_hot(label, num_classes)
else:
label = tf.cast(label, dtype)
return image, label
if proportion == 1.0:
if validation_set:
new_name = '{}:3.*.*'.format(name)
if split == 'validation':
new_split = 'train[{}%:]'.format(
int(100 * (1. - validation_proportion)))
dataset = tfds.load(new_name, split=new_split, as_supervised=True)
elif split == tfds.Split.TRAIN:
new_split = 'train[:{}%]'.format(
int(100 * (1. - validation_proportion)))
dataset = tfds.load(name, split='train[:95%]', as_supervised=True)
# split == tfds.Split.TEST case
else:
dataset = tfds.load(name, split=split, as_supervised=True)
else:
dataset = tfds.load(name, split=split, as_supervised=True)
else:
logging.warning(
'Subset of training dataset is being used without a validation set.')
new_name = '{}:3.*.*'.format(name)
if split == tfds.Split.TRAIN:
new_split = 'train[:{}%]'.format(int(100 * proportion))
else:
new_split = 'test[:{}%]'.format(int(100 * proportion))
dataset = tfds.load(new_name, split=new_split, as_supervised=True)
if split == tfds.Split.TRAIN:
dataset = dataset.shuffle(buffer_size=dataset_size).repeat()
dataset = dataset.map(preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
if mixup_alpha > 0 and split == tfds.Split.TRAIN:
if adaptive_mixup:
dataset = dataset.map(
functools.partial(adaptive_mixup_aug, batch_size, aug_params),
num_parallel_calls=8)
else:
dataset = dataset.map(
functools.partial(mixup, batch_size, aug_params),
num_parallel_calls=8)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def augment_and_mix(image, depth, width, prob_coeff, augmenter, dtype):
"""Apply mixture of augmentations to image."""
mix_weight = tf.squeeze(tfd.Beta([prob_coeff], [prob_coeff]).sample([1]))
if width > 1:
branch_weights = tf.squeeze(tfd.Dirichlet([prob_coeff] * width).sample([1]))
else:
branch_weights = tf.constant([1.])
if depth < 0:
depth = tf.random.uniform([width],
minval=1,
maxval=4,
dtype=tf.dtypes.int32)
else:
depth = tf.constant([depth] * width)
mix = tf.cast(tf.zeros_like(image), tf.float32)
for i in tf.range(width):
branch_img = tf.identity(image)
for _ in tf.range(depth[i]):
branch_img = augmenter.distort(branch_img)
branch_img = normalize_convert_image(branch_img, dtype)
mix += branch_weights[i] * branch_img
return mix_weight * mix + (
1 - mix_weight) * normalize_convert_image(image, dtype)
def _augmix(image, params, augmenter, dtype):
"""Apply augmix augmentation to image."""
depth = params['augmix_depth']
width = params['augmix_width']
prob_coeff = params['augmix_prob_coeff']
count = params['aug_count']
augmented = [
augment_and_mix(image, depth, width, prob_coeff, augmenter, dtype)
for _ in range(count)
]
image = normalize_convert_image(image, dtype)
return tf.stack([image] + augmented, 0)
def mixup(batch_size, aug_params, images, labels):
"""Applies Mixup regularization to a batch of images and labels.
[1] Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz
Mixup: Beyond Empirical Risk Minimization.
ICLR'18, https://arxiv.org/abs/1710.09412
Arguments:
batch_size: The input batch size for images and labels.
aug_params: Dict of data augmentation hyper parameters.
images: A batch of images of shape [batch_size, ...]
labels: A batch of labels of shape [batch_size, num_classes]
Returns:
A tuple of (images, labels) with the same dimensions as the input with
Mixup regularization applied.
"""
augmix = aug_params.get('augmix', False)
alpha = aug_params.get('mixup_alpha', 0.)
aug_count = aug_params.get('aug_count', 3)
# 4 is hard-coding to aug_count=3. Fix this later!
if augmix:
mix_weight = tfd.Beta(alpha, alpha).sample([batch_size, aug_count + 1, 1])
else:
mix_weight = tfd.Beta(alpha, alpha).sample([batch_size, 1])
mix_weight = tf.maximum(mix_weight, 1. - mix_weight)
if augmix:
images_mix_weight = tf.reshape(mix_weight,
[batch_size, aug_count + 1, 1, 1, 1])
else:
images_mix_weight = tf.reshape(mix_weight, [batch_size, 1, 1, 1])
# Mixup on a single batch is implemented by taking a weighted sum with the
# same batch in reverse.
images_mix = (
images * images_mix_weight + images[::-1] * (1. - images_mix_weight))
if augmix:
labels = tf.reshape(
tf.tile(labels, [1, aug_count + 1]), [batch_size, aug_count + 1, -1])
labels_mix = labels * mix_weight + labels[::-1] * (1. - mix_weight)
labels_mix = tf.reshape(tf.transpose(
labels_mix, [1, 0, 2]), [batch_size * (aug_count + 1), -1])
else:
labels_mix = labels * mix_weight + labels[::-1] * (1. - mix_weight)
return images_mix, labels_mix
def adaptive_mixup_aug(batch_size, aug_params, images, labels):
"""Applies Confidence Adjusted Mixup (CAMixup) regularization.
[1] Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz
Mixup: Beyond Empirical Risk Minimization.
ICLR'18, https://arxiv.org/abs/1710.09412
Arguments:
batch_size: The input batch size for images and labels.
aug_params: Dict of data augmentation hyper parameters.
images: A batch of images of shape [batch_size, ...]
labels: A batch of labels of shape [batch_size, num_classes]
Returns:
A tuple of (images, labels) with the same dimensions as the input with
Mixup regularization applied.
"""
augmix = aug_params['augmix']
ensemble_size = aug_params['ensemble_size']
mixup_coeff = aug_params['mixup_coeff']
scalar_labels = tf.argmax(labels, axis=1)
alpha = tf.gather(mixup_coeff, scalar_labels, axis=-1) # 4 x Batch_size
# Need to filter out elements in alpha which equal to 0.
greater_zero_indicator = tf.cast(alpha > 0, alpha.dtype)
less_one_indicator = tf.cast(alpha < 1, alpha.dtype)
valid_alpha_indicator = tf.cast(
greater_zero_indicator * less_one_indicator, tf.bool)
sampled_alpha = tf.where(valid_alpha_indicator, alpha, 0.1)
mix_weight = tfd.Beta(sampled_alpha, sampled_alpha).sample()
mix_weight = tf.where(valid_alpha_indicator, mix_weight, alpha)
mix_weight = tf.reshape(mix_weight, [ensemble_size * batch_size, 1])
mix_weight = tf.clip_by_value(mix_weight, 0, 1)
mix_weight = tf.maximum(mix_weight, 1. - mix_weight)
images_mix_weight = tf.reshape(mix_weight,
[ensemble_size * batch_size, 1, 1, 1])
# Mixup on a single batch is implemented by taking a weighted sum with the
# same batch in reverse.
if augmix:
images_shape = tf.shape(images)
images = tf.reshape(tf.transpose(
images, [1, 0, 2, 3, 4]), [-1, images_shape[2],
images_shape[3], images_shape[4]])
else:
images = tf.tile(images, [ensemble_size, 1, 1, 1])
labels = tf.tile(labels, [ensemble_size, 1])
images_mix = (
images * images_mix_weight + images[::-1] * (1. - images_mix_weight))
labels_mix = labels * mix_weight + labels[::-1] * (1. - mix_weight)
return images_mix, labels_mix
| [
"[email protected]"
] | |
341214ce0a249bddd010f09c10ca7e03d99e3426 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-core/Examples/Scripts/wmEnable.py | df5e11a37bc3d66f12b37c05d46dbc5fd132e7d2 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | #!/usr/bin/pythonw
"""
This is an evil undocumented SPI hack that shows how to enable GUI operation
from a console application.
BUNDLES ARE RECOMMENDED, USE THIS AT YOUR OWN RISK!!
"""
import os
import sys
import objc
from Foundation import *
def S(*args):
return b"".join(args)
OSErr = objc._C_SHT
OUTPSN = b"o^{ProcessSerialNumber=LL}"
INPSN = b"n^{ProcessSerialNumber=LL}"
FUNCTIONS = [
# These two are public API
("GetCurrentProcess", S(OSErr, OUTPSN)),
("SetFrontProcess", S(OSErr, INPSN)),
# This is undocumented SPI
("CPSSetProcessName", S(OSErr, INPSN, objc._C_CHARPTR)),
("CPSEnableForegroundOperation", S(OSErr, INPSN)),
]
def WMEnable(name="Python"):
if not isinstance(name, bytes):
name = name.encode("utf8")
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(
objc.pathForFramework(
"/System/Library/Frameworks/ApplicationServices.framework"
)
)
if bndl is None:
print >>sys.stderr, "ApplicationServices missing"
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, "Missing", fn
return False
err, psn = d["GetCurrentProcess"](None)
if err:
print >>sys.stderr, "GetCurrentProcess", (err, psn)
return False
err = d["CPSSetProcessName"](psn, name)
if err:
print >>sys.stderr, "CPSSetProcessName", (err, psn)
return False
err = d["CPSEnableForegroundOperation"](psn)
if err:
print >>sys.stderr, "CPSEnableForegroundOperation", (err, psn)
return False
err = d["SetFrontProcess"](psn)
if err:
print >>sys.stderr, "SetFrontProcess", (err, psn)
return False
return True
class AppDelegate(NSObject):
def applicationDidFinishLaunching_(self, sender):
rval = AppKit.NSRunAlertPanel("WM Enabled", "WM was enabled!", None, None, None)
AppKit.NSApp().terminate_(self)
if __name__ == "__main__":
import sys
if WMEnable(os.path.basename(os.path.splitext(sys.argv[0])[0])):
import AppKit
app = AppKit.NSApplication.sharedApplication()
delegate = AppDelegate.alloc().init()
app.setDelegate_(delegate)
app.run()
else:
print("WM was not enabled")
| [
"[email protected]"
] | |
23431939ada901e854bbd6ac06687c0c52e512f9 | 23a3c76882589d302b614da5f4be0fc626b4f3cd | /python_modules/dagster/dagster/core/definitions/trigger.py | ac2193f821092157e9e91f5367bb6b2bc68ba5d4 | [
"Apache-2.0"
] | permissive | DavidKatz-il/dagster | 3641d04d387cdbe5535ae4f9726ce7dc1981a8c3 | 7c6d16eb8b3610a21020ecb479101db622d1535f | refs/heads/master | 2022-12-20T13:08:36.462058 | 2020-09-14T18:12:12 | 2020-09-14T22:43:26 | 264,703,873 | 0 | 0 | Apache-2.0 | 2020-06-16T09:49:00 | 2020-05-17T15:56:57 | Python | UTF-8 | Python | false | false | 4,312 | py | from collections import namedtuple
from dagster import check
from dagster.core.instance import DagsterInstance
from dagster.utils.backcompat import experimental_class_warning
from .mode import DEFAULT_MODE_NAME
class TriggeredExecutionContext(namedtuple("TriggeredExecutionContext", "instance")):
"""Trigger-specific execution context.
An instance of this class is made available as the first argument to the
TriggeredExecutionDefinition execution_params_fn
Attributes:
instance (DagsterInstance): The instance configured to run the triggered execution
"""
def __new__(
cls, instance,
):
experimental_class_warning("TriggeredExecutionContext")
return super(TriggeredExecutionContext, cls).__new__(
cls, check.inst_param(instance, "instance", DagsterInstance),
)
class TriggeredExecutionDefinition(object):
"""Define a pipeline execution that responds to a trigger
Args:
name (str): The name of this triggered execution to create.
pipeline_name (str): The name of the pipeline to execute when the trigger fires.
run_config_fn (Callable[[TriggeredExecutionContext], [Dict]]): A function that takes a
TriggeredExecutionContext object and returns the environment configuration that
parameterizes this execution, as a dict.
tags_fn (Optional[Callable[[TriggeredExecutionContext], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the triggered execution. Takes a
:py:class:`~dagster.TriggeredExecutionContext` and returns a dictionary of tags (string
key-value pairs).
should_execute_fn (Optional[Callable[[TriggeredExecutionContext], bool]]): A function that
runs at trigger time to determine whether a pipeline execution should be initiated or
skipped. Takes a :py:class:`~dagster.TriggeredExecutionContext` and returns a boolean
(``True`` if a pipeline run should be execute). Defaults to a function that always
returns ``True``.
mode (Optional[str]): The mode to apply when executing this pipeline. (default: 'default')
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the trigger fires. e.g. ``['*some_solid+', 'other_solid']``
"""
__slots__ = [
"_name",
"_pipeline_name",
"_tags_fn",
"_run_config_fn",
"_should_execute_fn",
"_mode",
"_solid_selection",
]
def __init__(
self,
name,
pipeline_name,
run_config_fn=None,
tags_fn=None,
should_execute_fn=None,
mode="default",
solid_selection=None,
):
experimental_class_warning("TriggeredExecutionDefinition")
self._name = check.str_param(name, "name")
self._pipeline_name = check.str_param(pipeline_name, "pipeline_name")
self._run_config_fn = check.opt_callable_param(
run_config_fn, "run_config_fn", lambda _context: {}
)
self._tags_fn = check.opt_callable_param(tags_fn, "tags_fn", lambda _context: {})
self._should_execute_fn = check.opt_callable_param(
should_execute_fn, "should_execute_fn", lambda _context: True
)
self._mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
self._solid_selection = check.opt_nullable_list_param(
solid_selection, "solid_selection", of_type=str
)
@property
def pipeline_name(self):
return self._pipeline_name
@property
def solid_selection(self):
return self._solid_selection
@property
def name(self):
return self._name
@property
def mode(self):
return self._mode
def get_run_config(self, context):
check.inst_param(context, "context", TriggeredExecutionContext)
return self._run_config_fn(context)
def get_tags(self, context):
check.inst_param(context, "context", TriggeredExecutionContext)
return self._tags_fn(context)
def should_execute(self, context):
check.inst_param(context, "context", TriggeredExecutionContext)
return self._should_execute_fn(context)
| [
"[email protected]"
] | |
8344d1a9044d83a7d0867f09887cb1d7af8c0729 | 3259ffe73a1b2f1a17f0cf0512452d47f47f441d | /Leet Code/268_missing_number.py | f3cae5afa7eb9cf57fddd9e5bb12c9d9ea15054c | [
"MIT"
] | permissive | aayushmaru18/Competitive-Programming | a9160509afe32ee3eced0b7d830c33d62ba6f146 | 0ef237a140901005371a792eea4676b5386c7c50 | refs/heads/master | 2023-05-04T22:03:01.224426 | 2021-06-01T08:37:19 | 2021-06-01T08:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
s = n*(n+1)//2
for i in nums:
s -= i
return s
| [
"[email protected]"
] | |
ffc3e0a708efdb334677d8fcea0d1a1dc4ef2f87 | 81c8beba79c93c50df57ae9654ed23a6b5a1546f | /more/highrest/model.py | 539ab0b21c09af56c5c1161765d2bf3524b4d785 | [] | no_license | morepath/more.highrest | d80a0f3813b246ce636e63b3bf62954ac899ee2f | c15b700b647cd59f4a4dc8bb422e8eb7f9574c4d | refs/heads/master | 2021-01-23T03:21:27.649821 | 2017-03-24T16:41:56 | 2017-03-24T16:41:56 | 86,072,014 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | class CollectionBase(object):
def clone(self, offset, limit, *args, **kw):
raise NotImplementedError()
def add(self, data):
raise NotImplementedError()
def query(self):
raise NotImplementedError()
@staticmethod
def load(request):
raise NotImplementedError()
def count(self):
raise NotImplementedError()
def previous(self):
if self.offset == 0:
return None
offset = self.offset - self.limit
if offset < 0:
offset = 0
return self.clone(offset, self.limit)
def next(self):
if self.offset + self.limit >= self.count():
return None
offset = self.offset + self.limit
return self.clone(offset, self.limit)
class ItemBase(object):
def update(self, data):
raise NotImplementedError()
def remove(self):
raise NotImplementedError()
| [
"[email protected]"
] | |
0603a3669f5e124d892ba55d02f5e796c270385f | e46f56cc1ffa52dee7da4efc718a09405a323a4e | /COT/commands/tests/test_edit_hardware.py | 0f68e914fafb800190f595e04ffbdf89335ad945 | [
"MIT"
] | permissive | harrisonfeng/cot | b8f8a7610c73c3154f7428c576b29d9c3795a97c | 532a6263c5b0462aa290a2852d27317fcc76d576 | refs/heads/master | 2020-12-31T04:15:57.133565 | 2017-04-03T18:27:12 | 2017-04-03T18:27:12 | 56,671,341 | 0 | 0 | null | 2016-04-20T08:51:56 | 2016-04-20T08:51:55 | null | UTF-8 | Python | false | false | 86,299 | py | #!/usr/bin/env python
#
# edit_hardware.py - test cases for the COTEditHardware class
#
# December 2014, Glenn F. Matthews
# Copyright (c) 2013-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for the COT.edit_hardware.COTEditHardware class."""
import re
from COT.commands.tests.command_testcase import CommandTestCase
from COT.ui import UI
from COT.commands.edit_hardware import COTEditHardware
from COT.data_validation import InvalidInputError
from COT.platforms import IOSv, IOSXRv
class TestCOTEditHardware(CommandTestCase):
"""Test the COTEditHardware class."""
NEW_HW_FROM_SCRATCH = {
'levelname': 'NOTICE',
'msg': "No existing items.*Will create new.*from scratch",
}
MEMORY_UNIT_GUESS = {
'levelname': 'WARNING',
'msg': "Memory units not specified, guessing",
}
NO_ITEMS_NO_WORK = {
'levelname': 'WARNING',
'msg': "No items.*found. Nothing to do.",
}
REMOVING_NETWORKSECTION = {
'levelname': "NOTICE",
'msg': "removing NetworkSection",
}
GENERIC_NETWORK = {
'levelname': "WARNING",
'msg': "No network names specified, but NICs must be mapped.*",
'args': ('VM Network',),
}
@staticmethod
def removing_network_message(name=None):
"""Warning log message for deleting a network entry.
Args:
name (str): Name of network being deleted. Defaults to 'VM Network'.
Returns:
dict: kwargs suitable for passing into :meth:`assertLogged`
"""
if not name:
name = "VM Network"
return {
'levelname': "NOTICE",
'msg': "Removing unused network %s",
'args': [name],
}
command_class = COTEditHardware
def test_not_ready_with_no_args(self):
"""Test ready_to_run() behavior."""
self.command.package = self.input_ovf
ready, reason = self.command.ready_to_run()
self.assertEqual(ready, False)
self.assertTrue(re.search("No work requested", reason))
self.assertRaises(InvalidInputError, self.command.run)
def test_valid_args(self):
"""Verify that various valid args are accepted and stored."""
self.command.package = self.input_ovf
self.command.cpus = "1"
self.assertEqual(self.command.cpus, 1)
self.command.memory = "1GB"
self.assertEqual(self.command.memory, 1024)
self.command.memory = "2g"
self.assertEqual(self.command.memory, 2048)
self.command.memory = "256M"
self.assertEqual(self.command.memory, 256)
self.command.memory = "1024"
self.assertLogged(**self.MEMORY_UNIT_GUESS)
self.assertEqual(self.command.memory, 1024)
self.command.nics = 1
self.assertEqual(self.command.nics, 1)
self.command.serial_ports = 1
self.assertEqual(self.command.serial_ports, 1)
def test_invalid_always_args(self):
"""Verify that various values are always invalid."""
# pylint: disable=redefined-variable-type
self.command.package = self.input_ovf
with self.assertRaises(InvalidInputError):
self.command.cpus = 0
with self.assertRaises(InvalidInputError):
self.command.cpus = "a"
with self.assertRaises(InvalidInputError):
self.command.memory = 0
with self.assertRaises(InvalidInputError):
self.command.memory = "GB"
with self.assertRaises(InvalidInputError):
self.command.nics = -1
with self.assertRaises(InvalidInputError):
self.command.nics = "b"
with self.assertRaises(InvalidInputError):
self.command.serial_ports = -1
with self.assertRaises(InvalidInputError):
self.command.serial_ports = "c"
def test_valid_by_platform(self):
"""Verify that some input values' validity depends on platform."""
self.command.package = self.input_ovf
self.command.ui.default_confirm_response = False
# IOSv only supports 1 vCPU and up to 3 GB of RAM
self.set_vm_platform(IOSv)
with self.assertRaises(InvalidInputError):
self.command.cpus = 2
with self.assertRaises(InvalidInputError):
self.command.memory = "4GB"
# ...but IOSXRv supports up to 8 CPUs and 3-8 GB of RAM
self.set_vm_platform(IOSXRv)
self.command.cpus = 2
self.command.cpus = 8
with self.assertRaises(InvalidInputError):
self.command.cpus = 9
self.command.memory = "4"
self.assertLogged(**self.MEMORY_UNIT_GUESS)
self.command.memory = "8GB"
with self.assertRaises(InvalidInputError):
self.command.memory = "9GB"
def test_set_system_type_single(self):
"""Set the VirtualSystemType to a single value."""
self.command.package = self.input_ovf
self.command.virtual_system_type = ['vmx-09']
self.command.run()
self.command.finished()
self.check_diff("""
<vssd:VirtualSystemIdentifier>test</vssd:VirtualSystemIdentifier>
- <vssd:VirtualSystemType>vmx-07 vmx-08</vssd:VirtualSystemType>
+ <vssd:VirtualSystemType>vmx-09</vssd:VirtualSystemType>
</ovf:System>
""")
def test_set_system_type_list(self):
"""Set the VirtualSystemType to a list of values."""
self.command.package = self.input_ovf
self.command.virtual_system_type = \
['vmx-07', 'vmx-08', 'vmx-09', 'Cisco:Internal:VMCloud-01']
# 'profiles' will be ignored in this case,
# as VirtualSystemType is not filtered by profile
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.run()
# TODO - catch warning logger message that should be generated
# due to profiles being ignored.
self.command.finished()
self.check_diff("""
<vssd:VirtualSystemIdentifier>test</vssd:VirtualSystemIdentifier>
- <vssd:VirtualSystemType>vmx-07 vmx-08</vssd:VirtualSystemType>
+ <vssd:VirtualSystemType>vmx-07 vmx-08 vmx-09 \
Cisco:Internal:VMCloud-01</vssd:VirtualSystemType>
</ovf:System>
""")
def test_set_system_type_no_existing(self):
"""Add a VirtualSystemType to an OVF that doesn't have any."""
self.command.package = self.minimal_ovf
self.command.virtual_system_type = ['vmx-07', 'vmx-08']
self.command.run()
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_VirtualSystemSettingData">
<ovf:References />
...
<ovf:Info />
+ <ovf:System>
+ <vssd:ElementName>Virtual System Type</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemType>vmx-07 vmx-08</vssd:VirtualSystemType>
+ </ovf:System>
</ovf:VirtualHardwareSection>
""")
def test_set_cpus_one_profile(self):
"""Change the number of CPUs under a specific profile."""
self.command.package = self.input_ovf
self.command.cpus = 8
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
+ <rasd:ElementName>8 virtual CPU(s)</rasd:ElementName>
<rasd:InstanceID>1</rasd:InstanceID>
<rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
+ <rasd:VirtualQuantity>8</rasd:VirtualQuantity>
<vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
""")
def test_set_cpus_merge_profiles(self):
"""Change # CPUs under one profile to match another profile."""
self.command.package = self.input_ovf
self.command.cpus = 4
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
- <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
- </ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item ovf:configuration="2CPU-2GB-1NIC 4CPU-4GB-3NIC">
<rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
""")
def test_set_cpus_all_profiles(self):
"""Change value under all profiles, merging a group of Items."""
self.command.package = self.input_ovf
self.command.cpus = 1
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
- <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
- </ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>4 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>4</rasd:VirtualQuantity>
- <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
- </ovf:Item>
<ovf:Item>
""")
def test_set_cpus_no_existing(self):
"""Create a CPU definition in an OVF that doesn't have one."""
self.command.package = self.minimal_ovf
self.command.cpus = 1
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
<ovf:References />
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:ElementName>cpu</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_memory_one_profile(self):
"""Set memory allocation under one profile."""
self.command.package = self.input_ovf
self.command.memory = 3072
self.assertLogged(**self.MEMORY_UNIT_GUESS)
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>2048MB of memory</rasd:ElementName>
+ <rasd:ElementName>3072MB of memory</rasd:ElementName>
<rasd:InstanceID>2</rasd:InstanceID>
<rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ <rasd:VirtualQuantity>3072</rasd:VirtualQuantity>
</ovf:Item>
""")
def test_set_memory_all_profiles(self):
"""Set memory allocation under one profile."""
self.command.package = self.input_ovf
self.command.memory = "3072M"
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>1024MB of memory</rasd:ElementName>
+ <rasd:ElementName>3072MB of memory</rasd:ElementName>
<rasd:InstanceID>2</rasd:InstanceID>
<rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>
- </ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
- <rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>2048MB of memory</rasd:ElementName>
- <rasd:InstanceID>2</rasd:InstanceID>
- <rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
- </ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
- <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
- <rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>4096MB of memory</rasd:ElementName>
- <rasd:InstanceID>2</rasd:InstanceID>
- <rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>4096</rasd:VirtualQuantity>
+ <rasd:VirtualQuantity>3072</rasd:VirtualQuantity>
</ovf:Item>
""")
def test_set_memory_no_existing(self):
"""Create a RAM definition in an OVF that doesn't have one."""
self.command.package = self.minimal_ovf
self.command.memory = "4GB"
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
<ovf:References />
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:ElementName>memory</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>4096</rasd:VirtualQuantity>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_nic_type_one_profile(self):
"""Set NIC hardware type under a single profile."""
self.command.package = self.input_ovf
self.command.profiles = ['4CPU-4GB-3NIC']
self.command.nic_type = "E1000"
self.command.run()
self.command.finished()
# This requires cloning the "default" NIC under instance 11
# to create a profile-specific version of this NIC
self.check_diff("""
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:AddressOnParent>11</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:Description>E1000 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:InstanceID>11</rasd:InstanceID>
+ <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
<rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Description>E1000 ethernet adapter on "VM Network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
...
<rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Description>E1000 ethernet adapter on "VM Network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
""")
def test_set_nic_type_all_profiles(self):
"""Change NIC hardware type under all profiles."""
self.command.package = self.input_ovf
self.command.nic_type = "virtio-net-pci"
self.assertEqual(self.command.nic_type, "virtio")
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Description>virtio ethernet adapter on "VM Network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet1</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
...
<rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Description>virtio ethernet adapter on "VM Network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
...
<rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Description>virtio ethernet adapter on "VM Network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
""")
def test_set_nic_type_no_existing(self):
"""Set NIC hardware type for an OVF with no NICs (no-op)."""
self.command.package = self.minimal_ovf
self.command.nic_type = "virtio"
self.command.run()
self.assertLogged(**self.NO_ITEMS_NO_WORK)
self.command.finished()
self.check_diff("", file1=self.minimal_ovf)
def test_set_nic_count_add(self):
"""Add additional NICs across all profiles."""
self.command.package = self.input_ovf
self.command.nics = 5
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item>
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item>
<rasd:AddressOnParent>13</rasd:AddressOnParent>
...
<rasd:InstanceID>13</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:ElementName>Ethernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>15</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:ElementName>Ethernet5</rasd:ElementName>
+ <rasd:InstanceID>15</rasd:InstanceID>
<rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>""")
def test_set_nic_count_add_smart_networks(self):
"""Add additional NICs (and implicitly networks) across all profiles.
In this OVF, each NIC is mapped to a unique network, so COT must be
smart enough to create additional networks as well.
"""
self.command.package = self.csr_ovf
self.command.nics = 6
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Description>Data network 3</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="GigabitEthernet4">
+ <ovf:Description>Data network 4</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="GigabitEthernet5">
+ <ovf:Description>Data network 5</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="GigabitEthernet6">
+ <ovf:Description>Data network 6</ovf:Description>
</ovf:Network>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>GigabitEthernet4</rasd:Connection>
+ <rasd:Description>NIC representing GigabitEthernet4</rasd:Description>
+ <rasd:ElementName>GigabitEthernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3 virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>15</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>GigabitEthernet5</rasd:Connection>
+ <rasd:Description>NIC representing GigabitEthernet5</rasd:Description>
+ <rasd:ElementName>GigabitEthernet5</rasd:ElementName>
+ <rasd:InstanceID>15</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3 virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>16</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>GigabitEthernet6</rasd:Connection>
+ <rasd:Description>NIC representing GigabitEthernet6</rasd:Description>
+ <rasd:ElementName>GigabitEthernet6</rasd:ElementName>
+ <rasd:InstanceID>16</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3 virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>""", file1=self.csr_ovf)
def test_set_nic_count_named_nics_and_networks(self):
"""Add more NICs and explicitly named networks across all profiles.
This tests a user-reported issue where COT gets confused because the
base OVF uses the same strings for NIC and network names, but the
desired output OVF does not.
"""
self.command.package = self.csr_ovf
self.command.nics = 4
self.command.nic_names = ['GigabitEthernet{1}']
self.command.nic_networks = ["Alpha", "Beta", "Delta", "Gamma"]
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message('GigabitEthernet1'))
self.assertLogged(**self.removing_network_message('GigabitEthernet2'))
self.assertLogged(**self.removing_network_message('GigabitEthernet3'))
self.check_diff("""
<ovf:Info>The list of logical networks</ovf:Info>
- <ovf:Network ovf:name="GigabitEthernet1">
- <ovf:Description>Data network 1</ovf:Description>
+ <ovf:Network ovf:name="Alpha">
+ <ovf:Description>Alpha</ovf:Description>
</ovf:Network>
- <ovf:Network ovf:name="GigabitEthernet2">
- <ovf:Description>Data network 2</ovf:Description>
+ <ovf:Network ovf:name="Beta">
+ <ovf:Description>Beta</ovf:Description>
</ovf:Network>
- <ovf:Network ovf:name="GigabitEthernet3">
- <ovf:Description>Data network 3</ovf:Description>
+ <ovf:Network ovf:name="Delta">
+ <ovf:Description>Delta</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="Gamma">
+ <ovf:Description>Gamma</ovf:Description>
</ovf:Network>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>GigabitEthernet1</rasd:Connection>
+ <rasd:Connection>Alpha</rasd:Connection>
<rasd:Description>NIC representing GigabitEthernet1</rasd:Description>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>GigabitEthernet2</rasd:Connection>
+ <rasd:Connection>Beta</rasd:Connection>
<rasd:Description>NIC representing GigabitEthernet2</rasd:Description>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>GigabitEthernet3</rasd:Connection>
+ <rasd:Connection>Delta</rasd:Connection>
<rasd:Description>NIC representing GigabitEthernet3</rasd:Description>
...
<rasd:InstanceID>13</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3 virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>Gamma</rasd:Connection>
+ <rasd:Description>NIC representing GigabitEthernet4</rasd:Description>
+ <rasd:ElementName>GigabitEthernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
<rasd:ResourceSubType>VMXNET3 virtio</rasd:ResourceSubType>
""", file1=self.csr_ovf)
def test_set_nic_count_merge_profiles(self):
"""Add NICs that already exist under one profile to another."""
self.command.package = self.input_ovf
self.command.nics = 3
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item ovf:configuration="2CPU-2GB-1NIC 4CPU-4GB-3NIC">
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item ovf:configuration="2CPU-2GB-1NIC 4CPU-4GB-3NIC">
<rasd:AddressOnParent>13</rasd:AddressOnParent>
""")
def test_set_nic_count_create_new_one_profile(self):
"""Create a new NIC under a single profile."""
self.command.package = self.input_ovf
self.command.nics = '4'
self.command.profiles = ['4CPU-4GB-3NIC']
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:ElementName>Ethernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_nic_count_create_new_and_new_profile(self):
"""Create new NICs under a new profile. Test for issue #64."""
self.command.package = self.input_ovf
self.command.nics = '4'
self.command.profiles = ['4CPU-4GB-4NIC']
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Configuration>
+ <ovf:Configuration ovf:id="4CPU-4GB-4NIC">
+ <ovf:Label>4CPU-4GB-4NIC</ovf:Label>
+ <ovf:Description>4CPU-4GB-4NIC</ovf:Description>
+ </ovf:Configuration>
</ovf:DeploymentOptionSection>
...
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC 4CPU-4GB-4NIC">
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC 4CPU-4GB-4NIC">
<rasd:AddressOnParent>13</rasd:AddressOnParent>
...
<rasd:InstanceID>13</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-4NIC">
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:ElementName>Ethernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
<rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
""")
def test_set_nic_count_create_new_and_split_new_profile(self):
"""Create new NICs under a new profile splitting from unified profile.
Another test for issue #64.
"""
self.command.package = self.csr_ovf
self.command.nics = '4'
self.command.profiles = ['4CPU-4GB-4NIC']
self.command.run()
self.command.finished()
self.check_diff(file1=self.csr_ovf, expected="""
</ovf:Network>
+ <ovf:Network ovf:name="GigabitEthernet4">
+ <ovf:Description>Data network 4</ovf:Description>
+ </ovf:Network>
</ovf:NetworkSection>
...
<ovf:Description>Large hardware profile (requires purchase of DRAM \
upgrade SKU) - 4 vCPUs, 8 GB RAM</ovf:Description>
+ </ovf:Configuration>
+ <ovf:Configuration ovf:id="4CPU-4GB-4NIC">
+ <ovf:Label>4CPU-4GB-4NIC</ovf:Label>
+ <ovf:Description>4CPU-4GB-4NIC</ovf:Description>
</ovf:Configuration>
...
</ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-4NIC">
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>GigabitEthernet4</rasd:Connection>
+ <rasd:Description>NIC representing GigabitEthernet4</rasd:Description>
+ <rasd:ElementName>GigabitEthernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3 virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_nic_count_delete_nics(self):
"""Set NIC count to a lower value, deleting some NICs."""
self.command.package = self.input_ovf
self.command.nics = 0
self.command.profiles = ['1CPU-1GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item>
+ <ovf:Item ovf:configuration="2CPU-2GB-1NIC 4CPU-4GB-3NIC">
<rasd:AddressOnParent>11</rasd:AddressOnParent>
""")
def test_set_nic_count_delete_nics_new_profile(self):
"""Set NIC count to a lower value under a newly created profile."""
self.command.package = self.csr_ovf
self.command.nics = 1
self.command.profiles = ['1CPU-4GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff(file1=self.csr_ovf, expected="""
</ovf:Configuration>
+ <ovf:Configuration ovf:id="1CPU-4GB-1NIC">
+ <ovf:Label>1CPU-4GB-1NIC</ovf:Label>
+ <ovf:Description>1CPU-4GB-1NIC</ovf:Description>
+ </ovf:Configuration>
</ovf:DeploymentOptionSection>
...
</ovf:Item>
- <ovf:Item>
+ <ovf:Item ovf:configuration="1CPU-4GB 2CPU-4GB 4CPU-4GB 4CPU-8GB">
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
</ovf:Item>
- <ovf:Item>
+ <ovf:Item ovf:configuration="1CPU-4GB 2CPU-4GB 4CPU-4GB 4CPU-8GB">
<rasd:AddressOnParent>13</rasd:AddressOnParent>
""")
def test_set_nic_count_no_existing(self):
"""Create a NIC when nothing pre-exists."""
self.command.package = self.minimal_ovf
self.command.nics = 2
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.assertLogged(**self.GENERIC_NETWORK)
self.command.finished()
self.check_diff(file1=self.minimal_ovf, expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
<ovf:References />
+ <ovf:NetworkSection>
+ <ovf:Info>Logical networks</ovf:Info>
+ <ovf:Network ovf:name="VM Network">
+ <ovf:Description>VM Network</ovf:Description>
+ </ovf:Network>
+ </ovf:NetworkSection>
<ovf:VirtualSystem ovf:id="x">
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:ElementName>Ethernet1</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:ElementName>Ethernet2</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_nic_count_zero_then_re_add(self):
"""Set NIC count to zero, then recreate the NICs."""
self.command.package = self.v09_ovf
self.command.nics = 0
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message('bridged'))
self.assertLogged(**self.REMOVING_NETWORKSECTION)
self.command.package = self.temp_file
self.command.nics = 1
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.assertLogged(**self.GENERIC_NETWORK)
self.command.finished()
self.check_diff(file1=self.v09_ovf, expected="""
<ovf:Section xsi:type="ovf:NetworkSection_Type">
- <ovf:Info>The list of logical networks</ovf:Info>
- <ovf:Network ovf:name="bridged">
- <ovf:Description>The bridged network</ovf:Description>
+ <ovf:Info>Logical networks</ovf:Info>
+ <ovf:Network ovf:name="VM Network">
+ <ovf:Description>VM Network</ovf:Description>
</ovf:Network>
...
<ovf:Item>
- <rasd:Caption>ethernet0</rasd:Caption>
- <rasd:Description>PCNet32 ethernet adapter</rasd:Description>
+ <rasd:Caption>Ethernet1</rasd:Caption>
<rasd:InstanceId>8</rasd:InstanceId>
<rasd:ResourceType>10</rasd:ResourceType>
- <rasd:ResourceSubType>PCNet32</rasd:ResourceSubType>
- <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>bridged</rasd:Connection>
- <rasd:AddressOnParent>1</rasd:AddressOnParent>
+ <rasd:Connection>VM Network</rasd:Connection>
</ovf:Item>
""")
def test_set_nic_network_one_profile(self):
"""Create a new network and map a NIC to it under a single profile."""
# Create a new network and map to it under one profile
# This involves splitting the existing NIC into two items
self.command.package = self.input_ovf
self.command.nic_networks = ['UT']
self.command.network_descriptions = ['Unit test network']
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Description>VM Network</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT">
+ <ovf:Description>Unit test network</ovf:Description>
</ovf:Network>
...
</ovf:Item>
+ <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
+ <rasd:AddressOnParent>11</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
+ <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:InstanceID>11</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
""")
def test_set_nic_network_all_profiles(self):
"""Test changing NIC network mapping across all profiles."""
self.command.package = self.input_ovf
self.command.nic_networks = ['UT', 'UT', 'UT']
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message())
self.check_diff("""
<ovf:Info>The list of logical networks</ovf:Info>
- <ovf:Network ovf:name="VM Network">
- <ovf:Description>VM Network</ovf:Description>
+ <ovf:Network ovf:name="UT">
+ <ovf:Description>UT</ovf:Description>
</ovf:Network>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
<rasd:ElementName>GigabitEthernet1</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
""")
def test_set_nic_network_list_expansion(self):
"""Specify fewer networks than NICs to test implicit NIC assignment.
Also specify fewer network descriptions than networks.
Remaining networks get the last description in the list.
Remaining NICs get the last network in the list.
"""
self.command.package = self.input_ovf
self.command.nic_networks = ['UT1', 'UT2']
self.command.network_descriptions = ['First UT']
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message())
self.check_diff("""
<ovf:Info>The list of logical networks</ovf:Info>
- <ovf:Network ovf:name="VM Network">
- <ovf:Description>VM Network</ovf:Description>
+ <ovf:Network ovf:name="UT1">
+ <ovf:Description>First UT</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT2">
+ <ovf:Description>First UT</ovf:Description>
</ovf:Network>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT1</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT1"</rasd:Description>
<rasd:ElementName>GigabitEthernet1</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT2</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT2"</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT2</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT2"</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
""")
def test_set_nic_network_list_pattern(self):
"""Use wildcard expansion to create multiple networks as needed."""
self.command.package = self.input_ovf
self.command.nic_networks = ["UT_{20}_network"]
self.command.network_descriptions = ['First network', '#{2} Network']
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message())
self.check_diff("""
<ovf:Info>The list of logical networks</ovf:Info>
- <ovf:Network ovf:name="VM Network">
- <ovf:Description>VM Network</ovf:Description>
+ <ovf:Network ovf:name="UT_20_network">
+ <ovf:Description>First network</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT_21_network">
+ <ovf:Description>#2 Network</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT_22_network">
+ <ovf:Description>#3 Network</ovf:Description>
</ovf:Network>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT_20_network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT_20_network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet1</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT_21_network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT_21_network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT_22_network</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT_22_network"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
""")
def test_set_network_description_only(self):
"""Set network descriptions without changing network names."""
self.command.package = self.input_ovf
self.command.network_descriptions = ['Network 1', 'Network 2']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Network ovf:name="VM Network">
- <ovf:Description>VM Network</ovf:Description>
+ <ovf:Description>Network 1</ovf:Description>
</ovf:Network>
""")
def test_set_nic_mac_address_single_all_profiles(self):
"""Set a single MAC address on all NICs on all profiles."""
self.command.package = self.input_ovf
self.command.mac_addresses_list = ['10:20:30:40:50:60']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Item>
+ <rasd:Address>10:20:30:40:50:60</rasd:Address>
<rasd:AddressOnParent>11</rasd:AddressOnParent>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>10:20:30:40:50:60</rasd:Address>
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>10:20:30:40:50:60</rasd:Address>
<rasd:AddressOnParent>13</rasd:AddressOnParent>
""")
def test_set_nic_mac_addresses_list_all_profiles(self):
"""Set a sequence of MAC addresses for all profiles."""
self.command.package = self.input_ovf
self.command.mac_addresses_list = \
['10:20:30:40:50:60', '01:02:03:04:05:06', 'ab:cd:ef:00:00:00']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Item>
+ <rasd:Address>10:20:30:40:50:60</rasd:Address>
<rasd:AddressOnParent>11</rasd:AddressOnParent>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>01:02:03:04:05:06</rasd:Address>
<rasd:AddressOnParent>12</rasd:AddressOnParent>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>ab:cd:ef:00:00:00</rasd:Address>
<rasd:AddressOnParent>13</rasd:AddressOnParent>
""")
def test_set_nic_name_list_exact(self):
"""Set a list of names identical in length to the number of NICs."""
self.command.package = self.input_ovf
self.command.nic_names = ['foo', 'bar', 'baz']
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:ElementName>foo</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet2</rasd:ElementName>
+ <rasd:ElementName>bar</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet3</rasd:ElementName>
+ <rasd:ElementName>baz</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
""")
def test_set_nic_name_list_extra(self):
"""Set a list of NIC names that's longer than needed."""
self.command.package = self.input_ovf
self.command.nic_names = ['foo', 'bar', 'baz', 'bat']
self.command.run()
self.assertLogged(levelname="WARNING",
msg="not all %s values were used",
args=('ethernet', 'ElementName', ['bat']))
self.command.finished()
self.check_diff("""
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:ElementName>foo</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet2</rasd:ElementName>
+ <rasd:ElementName>bar</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet3</rasd:ElementName>
+ <rasd:ElementName>baz</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
""")
def test_set_nic_name_list_short(self):
"""Set a list of NIC names that's shorter than needed."""
self.command.package = self.input_ovf
self.command.nic_names = ['foo', 'bar']
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:ElementName>foo</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet2</rasd:ElementName>
+ <rasd:ElementName>bar</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet3</rasd:ElementName>
+ <rasd:ElementName>bar</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
""")
def test_set_nic_name_pattern(self):
"""Set NIC names based on a pattern."""
self.command.package = self.input_ovf
self.command.nic_names = ['eth{0}']
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:ElementName>eth0</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet2</rasd:ElementName>
+ <rasd:ElementName>eth1</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet3</rasd:ElementName>
+ <rasd:ElementName>eth2</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
""")
def test_set_nic_name_list_pattern(self):
"""Set NIC names based on a constant plus a pattern."""
self.command.package = self.input_ovf
self.command.nic_names = ['foo', 'eth{10}']
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:ElementName>foo</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet2</rasd:ElementName>
+ <rasd:ElementName>eth10</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
...
<rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet3</rasd:ElementName>
+ <rasd:ElementName>eth11</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
""")
def test_deprecated_nic_type(self):
"""The nic_type method is deprecated by nic_types."""
self.command.package = self.input_ovf
self.assertEqual(self.command.nic_type, None)
self.command.nic_type = 'e1000'
self.assertEqual(self.command.nic_type, 'E1000')
self.assertEqual(self.command.nic_types, ['E1000'])
self.command.nic_types = ['e1000', 'virtio']
self.assertEqual(self.command.nic_types, ['E1000', 'virtio'])
with self.assertRaises(TypeError):
assert self.command.nic_type
def test_set_nic_kitchen_sink_all_profiles(self):
"""Test changing many NIC properties at once under all profiles."""
self.command.package = self.input_ovf
self.command.nic_types = ['e1000', 'virtio']
self.command.nic_networks = ['UT1', 'UT2', 'UT3']
self.command.mac_addresses_list = \
['00:00:00:00:00:01', '11:22:33:44:55:66', 'fe:fd:fc:fb:fa:f9']
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message())
self.check_diff("""
<ovf:Info>The list of logical networks</ovf:Info>
- <ovf:Network ovf:name="VM Network">
- <ovf:Description>VM Network</ovf:Description>
+ <ovf:Network ovf:name="UT1">
+ <ovf:Description>UT1</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT2">
+ <ovf:Description>UT2</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT3">
+ <ovf:Description>UT3</ovf:Description>
</ovf:Network>
...
<ovf:Item>
+ <rasd:Address>00:00:00:00:00:01</rasd:Address>
<rasd:AddressOnParent>11</rasd:AddressOnParent>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT1</rasd:Connection>
+ <rasd:Description>E1000/virtio ethernet adapter on "UT1"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet1</rasd:ElementName>
<rasd:InstanceID>11</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>E1000 virtio</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>11:22:33:44:55:66</rasd:Address>
<rasd:AddressOnParent>12</rasd:AddressOnParent>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT2</rasd:Connection>
+ <rasd:Description>E1000/virtio ethernet adapter on "UT2"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
<rasd:InstanceID>12</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>E1000 virtio</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>fe:fd:fc:fb:fa:f9</rasd:Address>
<rasd:AddressOnParent>13</rasd:AddressOnParent>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT3</rasd:Connection>
+ <rasd:Description>E1000/virtio ethernet adapter on "UT3"\
</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceSubType>E1000 virtio</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
""")
def test_set_nic_kitchen_sink_one_profile(self):
"""Test changing many NIC properties at once under one profile."""
self.command.package = self.input_ovf
self.command.profiles = ['4CPU-4GB-3NIC']
self.command.nics = 4
self.command.nic_networks = ['UT']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Description>VM Network</ovf:Description>
+ </ovf:Network>
+ <ovf:Network ovf:name="UT">
+ <ovf:Description>UT</ovf:Description>
</ovf:Network>
...
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:AddressOnParent>11</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"\
</rasd:Description>
+ <rasd:ElementName>GigabitEthernet1</rasd:ElementName>
+ <rasd:InstanceID>11</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
<rasd:AddressOnParent>12</rasd:AddressOnParent>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
<rasd:ElementName>GigabitEthernet2</rasd:ElementName>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
<rasd:ElementName>GigabitEthernet3</rasd:ElementName>
<rasd:InstanceID>13</rasd:InstanceID>
+ <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:AddressOnParent>14</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>UT</rasd:Connection>
+ <rasd:Description>VMXNET3 ethernet adapter on "UT"</rasd:Description>
+ <rasd:ElementName>Ethernet4</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
<rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
""")
def test_set_nic_kitchen_sink_no_existing(self):
"""Define NIC in an OVF that previously had none."""
self.command.package = self.minimal_ovf
self.command.nics = 1
self.command.nic_networks = ['testme']
self.command.nic_types = ['virtio-net-pci', 'e1000']
self.command.mac_addresses_list = ['12:34:56:78:9a:bc']
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
<ovf:References />
+ <ovf:NetworkSection>
+ <ovf:Info>Logical networks</ovf:Info>
+ <ovf:Network ovf:name="testme">
+ <ovf:Description>testme</ovf:Description>
+ </ovf:Network>
+ </ovf:NetworkSection>
<ovf:VirtualSystem ovf:id="x">
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:Address>12:34:56:78:9a:bc</rasd:Address>
+ <rasd:Connection>testme</rasd:Connection>
+ <rasd:ElementName>Ethernet1</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio E1000</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_serial_count_delete_one_profile(self):
"""Remove a shared serial port from one profile only."""
self.command.package = self.input_ovf
self.command.profiles = ['2CPU-2GB-1NIC']
self.command.serial_ports = 1
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item ovf:required="false">
+ <ovf:Item ovf:configuration="1CPU-1GB-1NIC 4CPU-4GB-3NIC" \
ovf:required="false">
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
""")
def test_set_serial_count_delete_all_profiles(self):
"""Remove a serial port across all profiles."""
self.command.package = self.input_ovf
self.command.serial_ports = 1
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
- <ovf:Item ovf:required="false">
- <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Description>Serial Port acting as IOSd Aux Port\
</rasd:Description>
- <rasd:ElementName>Serial 2</rasd:ElementName>
- <rasd:InstanceID>10</rasd:InstanceID>
- <rasd:ResourceType>21</rasd:ResourceType>
- </ovf:Item>
<ovf:Item>
""")
def test_set_serial_count_create_all_profiles(self):
"""Create a serial port under all profiles."""
self.command.package = self.input_ovf
self.command.serial_ports = 3
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Item>
+ <ovf:Item ovf:required="false">
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Description>Serial Port acting as IOSd Aux Port\
</rasd:Description>
+ <rasd:ElementName>Serial 2</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceType>21</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_serial_count_no_existing(self):
"""Create a serial port in an OVF that had none."""
self.command.package = self.minimal_ovf
self.command.serial_ports = 1
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
<ovf:References />
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:ElementName>serial</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>21</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_set_serial_connectivity_one_port_all_profiles(self):
"""Set serial connectivity for one port under all profiles."""
self.command.package = self.input_ovf
self.command.serial_connectivity = ['telnet://localhost:22001']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Item ovf:required="false">
+ <rasd:Address>telnet://localhost:22001</rasd:Address>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
""")
def test_set_serial_connectivity_two_ports_all_profiles(self):
"""Set serial connectivity for multiple ports across all profiles."""
self.command.package = self.input_ovf
self.command.serial_connectivity = \
['telnet://localhost:22001', 'telnet://localhost:22002']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Item ovf:required="false">
+ <rasd:Address>telnet://localhost:22001</rasd:Address>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
...
<ovf:Item ovf:required="false">
+ <rasd:Address>telnet://localhost:22002</rasd:Address>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
""")
def test_serial_create_kitchen_sink(self):
"""Create a serial port and set connectivity in one pass."""
self.command.package = self.input_ovf
self.command.serial_ports = '3'
self.command.serial_connectivity = \
['telnet://foo:1', 'telnet://foo:2', 'telnet://foo:3']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Item ovf:required="false">
+ <rasd:Address>telnet://foo:1</rasd:Address>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
...
<ovf:Item ovf:required="false">
+ <rasd:Address>telnet://foo:2</rasd:Address>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
...
<rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item ovf:required="false">
+ <rasd:Address>telnet://foo:3</rasd:Address>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Description>Serial Port acting as IOSd Aux Port\
</rasd:Description>
+ <rasd:ElementName>Serial 2</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceType>21</rasd:ResourceType>
</ovf:Item>
""")
def test_serial_delete_kitchen_sink(self):
"""Delete a serial port and set connectivity in one pass."""
self.command.package = self.input_ovf
self.command.serial_ports = 1
self.command.serial_connectivity = ['telnet://bar:22']
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Item ovf:required="false">
+ <rasd:Address>telnet://bar:22</rasd:Address>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
...
<rasd:InstanceID>9</rasd:InstanceID>
- <rasd:ResourceType>21</rasd:ResourceType>
- </ovf:Item>
- <ovf:Item ovf:required="false">
- <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Description>Serial Port acting as IOSd Aux Port\
</rasd:Description>
- <rasd:ElementName>Serial 2</rasd:ElementName>
- <rasd:InstanceID>10</rasd:InstanceID>
<rasd:ResourceType>21</rasd:ResourceType>
""")
def test_set_scsi_subtype_all_profiles(self):
"""Set SCSI controller subtype under all profiles."""
self.command.package = self.input_ovf
self.command.scsi_subtype = "virtio"
self.assertEqual(self.command.scsi_subtype, "virtio")
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:InstanceID>3</rasd:InstanceID>
- <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
<rasd:ResourceType>6</rasd:ResourceType>
""")
def test_clear_scsi_subtype_all_profiles(self):
"""Clear SCSI controller subtype under all profiles."""
self.command.package = self.input_ovf
# TODO: this should really be an empty list or None
self.command.scsi_subtype = ""
self.assertEqual(self.command.scsi_subtype, None)
self.assertEqual(self.command.scsi_subtypes, [])
self.command.run()
self.command.finished()
self.check_diff("""
<rasd:InstanceID>3</rasd:InstanceID>
- <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
<rasd:ResourceType>6</rasd:ResourceType>
""")
def test_set_scsi_subtype_one_profile(self):
"""Set SCSI controller subtype under a single profile."""
self.command.package = self.input_ovf
self.command.scsi_subtypes = ['buslogic', 'lsilogic']
self.assertEqual(self.command.scsi_subtypes, ['buslogic', 'lsilogic'])
with self.assertRaises(TypeError):
assert self.command.scsi_subtype
self.command.profiles = ['4CPU-4GB-3NIC']
self.command.run()
self.command.finished()
# This requires creating a new variant of the SCSI controller
# specific to this profile
self.check_diff("""
</ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>SCSI Controller</rasd:Description>
+ <rasd:ElementName>SCSI Controller 0</rasd:ElementName>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceSubType>buslogic lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item>
""")
def test_set_scsi_subtype_no_existing(self):
"""Set SCSI controller subtype for an OVF with none (no-op)."""
self.command.package = self.minimal_ovf
self.assertEqual(self.command.scsi_subtype, None)
self.assertEqual(self.command.scsi_subtypes, None)
self.command.scsi_subtype = "virtualscsi"
self.assertEqual(self.command.scsi_subtype, "VirtualSCSI")
self.assertEqual(self.command.scsi_subtypes, ["VirtualSCSI"])
self.command.run()
self.assertLogged(**self.NO_ITEMS_NO_WORK)
self.command.finished()
self.check_diff("", file1=self.minimal_ovf)
def test_set_ide_subtype_all_profiles(self):
"""Set IDE controller subtype across all profiles."""
self.command.package = self.input_ovf
self.command.ide_subtypes = ["virtio", "piix4"]
self.assertEqual(self.command.ide_subtypes, ["virtio", "PIIX4"])
with self.assertRaises(TypeError):
assert self.command.ide_subtype
self.command.run()
self.command.finished()
# Since there is no pre-existing subtype, we just create it
# under each controller:
self.check_diff("""
<rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio PIIX4</rasd:ResourceSubType>
<rasd:ResourceType>5</rasd:ResourceType>
...
<rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio PIIX4</rasd:ResourceSubType>
<rasd:ResourceType>5</rasd:ResourceType>
""")
def test_set_ide_subtype_one_profile(self):
"""Set IDE controller subtype under a single profile."""
self.command.package = self.input_ovf
self.command.ide_subtype = "virtio"
self.assertEqual(self.command.ide_subtype, "virtio")
self.assertEqual(self.command.ide_subtypes, ["virtio"])
self.command.profiles = ['4CPU-4GB-3NIC']
self.command.run()
self.command.finished()
# Here we have to create new controllers under this profile
# while leaving the default alone
self.check_diff("""
</ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>1</rasd:Address>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:ElementName>VirtualIDEController 1</rasd:ElementName>
+ <rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item>
...
<rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:ElementName>VirtualIDEController 0</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
<rasd:ResourceType>5</rasd:ResourceType>
""")
def test_set_ide_subtype_no_existing(self):
"""Set IDE controller subtype for an OVF with none (no-op)."""
self.command.package = self.minimal_ovf
self.assertEqual(self.command.ide_subtype, None)
self.assertEqual(self.command.ide_subtypes, None)
self.command.ide_subtype = "virtio"
self.command.run()
self.assertLogged(**self.NO_ITEMS_NO_WORK)
self.command.finished()
self.check_diff("", file1=self.minimal_ovf)
def test_create_profile_inherit_default(self):
"""Create a new profile that's identical to the default one."""
self.command.package = self.input_ovf
self.command.profiles = ['UT']
self.command.cpus = 1
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Configuration>
+ <ovf:Configuration ovf:id="UT">
+ <ovf:Label>UT</ovf:Label>
+ <ovf:Description>UT</ovf:Description>
+ </ovf:Configuration>
</ovf:DeploymentOptionSection>
""")
def test_create_new_profile(self):
"""Create a new profile with new values."""
self.command.package = self.input_ovf
self.command.profiles = ['UT']
self.command.cpus = 8
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Configuration>
+ <ovf:Configuration ovf:id="UT">
+ <ovf:Label>UT</ovf:Label>
+ <ovf:Description>UT</ovf:Description>
+ </ovf:Configuration>
</ovf:DeploymentOptionSection>
...
</ovf:Item>
+ <ovf:Item ovf:configuration="UT">
+ <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+ <rasd:Description>Number of Virtual CPUs</rasd:Description>
+ <rasd:ElementName>8 virtual CPU(s)</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>8</rasd:VirtualQuantity>
+ <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
+ </ovf:Item>
<ovf:Item>
""")
def test_create_two_profiles(self):
"""Create two profiles at once."""
self.command.package = self.input_ovf
self.command.profiles = ['UT', 'UT2']
self.command.memory = 8192
self.assertLogged(**self.MEMORY_UNIT_GUESS)
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Configuration>
+ <ovf:Configuration ovf:id="UT">
+ <ovf:Label>UT</ovf:Label>
+ <ovf:Description>UT</ovf:Description>
+ </ovf:Configuration>
+ <ovf:Configuration ovf:id="UT2">
+ <ovf:Label>UT2</ovf:Label>
+ <ovf:Description>UT2</ovf:Description>
+ </ovf:Configuration>
</ovf:DeploymentOptionSection>
...
</ovf:Item>
+ <ovf:Item ovf:configuration="UT UT2">
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:ElementName>8192MB of memory</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>8192</rasd:VirtualQuantity>
+ </ovf:Item>
<ovf:Item>
""")
def test_create_profile_no_existing(self):
"""Add a profile to an OVF that doesn't have any."""
self.command.package = self.minimal_ovf
self.command.profiles = ['UT']
self.command.run()
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<ovf:References />
+ <ovf:DeploymentOptionSection>
+ <ovf:Info>Configuration Profiles</ovf:Info>
+ <ovf:Configuration ovf:id="UT">
+ <ovf:Label>UT</ovf:Label>
+ <ovf:Description>UT</ovf:Description>
+ </ovf:Configuration>
+ </ovf:DeploymentOptionSection>
<ovf:VirtualSystem ovf:id="x">
""")
def test_delete_one_profile(self):
"""Delete one configuration profile."""
self.command.package = self.input_ovf
self.command.profiles = ['1CPU-1GB-1NIC', '4CPU-4GB-3NIC']
self.command.delete_all_other_profiles = True
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Configuration>
- <ovf:Configuration ovf:id="2CPU-2GB-1NIC">
- <ovf:Label>2 vCPUs, 2 GB RAM, 1 NIC</ovf:Label>
- <ovf:Description>Minimal hardware profile - 2 vCPUs, 2 GB RAM, \
1 NIC</ovf:Description>
- </ovf:Configuration>
<ovf:Configuration ovf:default="true" ovf:id="4CPU-4GB-3NIC">
...
</ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
- <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
- </ovf:Item>
<ovf:Item ovf:configuration="4CPU-4GB-3NIC">
...
<rasd:VirtualQuantity>1024</rasd:VirtualQuantity>
- </ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
- <rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>2048MB of memory</rasd:ElementName>
- <rasd:InstanceID>2</rasd:InstanceID>
- <rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
</ovf:Item>
""")
def test_delete_all_profiles(self):
"""Delete all configuration profiles, leaving only the default hw."""
self.command.package = self.input_ovf
self.command.delete_all_other_profiles = True
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:NetworkSection>
- <ovf:DeploymentOptionSection>
- <ovf:Info>Configuration Profiles</ovf:Info>
- <ovf:Configuration ovf:id="1CPU-1GB-1NIC">
- <ovf:Label>1 vCPU, 1 GB RAM, 1 NIC</ovf:Label>
- <ovf:Description>Minimal hardware profile - 1 vCPU, 1 GB RAM, 1 NIC\
</ovf:Description>
- </ovf:Configuration>
- <ovf:Configuration ovf:id="2CPU-2GB-1NIC">
- <ovf:Label>2 vCPUs, 2 GB RAM, 1 NIC</ovf:Label>
- <ovf:Description>Minimal hardware profile - 2 vCPUs, 2 GB RAM, 1 NIC\
</ovf:Description>
- </ovf:Configuration>
- <ovf:Configuration ovf:default="true" ovf:id="4CPU-4GB-3NIC">
- <ovf:Label>4 vCPUs, 4 GB RAM, 3 NICs</ovf:Label>
- <ovf:Description>Default hardware profile - 4 vCPUs, 4 GB RAM, 3 NICs\
</ovf:Description>
- </ovf:Configuration>
- </ovf:DeploymentOptionSection>
<ovf:VirtualSystem ovf:id="test">
...
</ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
- <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
- </ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
- <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
- <rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>4 virtual CPU(s)</rasd:ElementName>
- <rasd:InstanceID>1</rasd:InstanceID>
- <rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>4</rasd:VirtualQuantity>
- <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
- </ovf:Item>
<ovf:Item>
...
<rasd:VirtualQuantity>1024</rasd:VirtualQuantity>
- </ovf:Item>
- <ovf:Item ovf:configuration="2CPU-2GB-1NIC">
- <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
- <rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>2048MB of memory</rasd:ElementName>
- <rasd:InstanceID>2</rasd:InstanceID>
- <rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
- </ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
- <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
- <rasd:Description>Memory Size</rasd:Description>
- <rasd:ElementName>4096MB of memory</rasd:ElementName>
- <rasd:InstanceID>2</rasd:InstanceID>
- <rasd:ResourceType>4</rasd:ResourceType>
- <rasd:VirtualQuantity>4096</rasd:VirtualQuantity>
</ovf:Item>
...
</ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
- <rasd:AddressOnParent>12</rasd:AddressOnParent>
- <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet2</rasd:ElementName>
- <rasd:InstanceID>12</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
- <rasd:ResourceType>10</rasd:ResourceType>
- </ovf:Item>
- <ovf:Item ovf:configuration="4CPU-4GB-3NIC">
- <rasd:AddressOnParent>13</rasd:AddressOnParent>
- <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
- <rasd:Connection>VM Network</rasd:Connection>
- <rasd:Description>VMXNET3 ethernet adapter on "VM Network"\
</rasd:Description>
- <rasd:ElementName>GigabitEthernet3</rasd:ElementName>
- <rasd:InstanceID>13</rasd:InstanceID>
- <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>
- <rasd:ResourceType>10</rasd:ResourceType>
- </ovf:Item>
</ovf:VirtualHardwareSection>
""")
def test_create_delete_network_no_existing(self):
"""Create then delete a network in an OVF with none previously."""
self.command.package = self.minimal_ovf
self.command.nic_networks = ["VM Network", "Foobar"]
self.command.nics = 1
self.command.run()
self.assertLogged(**self.NEW_HW_FROM_SCRATCH)
self.assertLogged(levelname="WARNING",
msg="not all %s values were used",
args=('ethernet', 'Connection', ['Foobar']))
self.command.finished()
# network 'Foobar' is not used, so it'll be deleted
self.assertLogged(**self.removing_network_message('Foobar'))
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
<ovf:References />
+ <ovf:NetworkSection>
+ <ovf:Info>Logical networks</ovf:Info>
+ <ovf:Network ovf:name="VM Network">
+ <ovf:Description>VM Network</ovf:Description>
+ </ovf:Network>
+ </ovf:NetworkSection>
<ovf:VirtualSystem ovf:id="x">
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:Connection>VM Network</rasd:Connection>
+ <rasd:ElementName>Ethernet1</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""")
self.command.destroy()
self.command = None
self.validate_with_ovftool(self.temp_file)
# Now remove all NICs and make sure it's cleaned back up
self.command = COTEditHardware(UI())
self.command.output = self.temp_file
self.command.package = self.temp_file
self.command.nics = 0
self.command.run()
self.command.finished()
self.assertLogged(**self.removing_network_message())
self.assertLogged(**self.REMOVING_NETWORKSECTION)
self.check_diff(file1=self.temp_file, file2=self.minimal_ovf,
expected="")
def test_set_cpus_v09(self):
"""Test CPU count settings with a v0.9 OVF."""
self.command.package = self.v09_ovf
self.command.cpus = 2
self.command.run()
self.command.finished()
self.check_diff(file1=self.v09_ovf,
expected="""
<ovf:Item>
- <rasd:Caption>1 virtual CPU(s)</rasd:Caption>
+ <rasd:Caption>2 virtual CPU(s)</rasd:Caption>
<rasd:Description>Number of Virtual CPUs</rasd:Description>
...
<rasd:AllocationUnits>MegaHertz</rasd:AllocationUnits>
- <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
</ovf:Item>
""")
def test_set_cpus_vmware(self):
"""Test CPU setting with a VMWare OVF."""
self.command.package = self.vmware_ovf
self.command.cpus = 4
self.command.run()
self.command.finished()
self.check_diff(file1=self.vmware_ovf,
expected="""
-<?xml version="1.0" encoding="UTF-8"?>
-<ovf:Envelope vmw:buildId="build-880146" \
xmlns="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" \
xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData" \
xmlns:vmw="http://www.vmware.com/schema/ovf" \
xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_VirtualSystemSettingData" \
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+<?xml version='1.0' encoding='utf-8'?>
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData" \
xmlns:vmw="http://www.vmware.com/schema/ovf" \
xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_VirtualSystemSettingData" vmw:buildId="build-880146">
<ovf:References>
...
<rasd:Description>Number of Virtual CPUs</rasd:Description>
- <rasd:ElementName>2 virtual CPU(s)</rasd:ElementName>
+ <rasd:ElementName>4 virtual CPU(s)</rasd:ElementName>
<rasd:InstanceID>1</rasd:InstanceID>
...
<rasd:ResourceType>3</rasd:ResourceType>
- <rasd:VirtualQuantity>2</rasd:VirtualQuantity>
+ <rasd:VirtualQuantity>4</rasd:VirtualQuantity>
<vmw:CoresPerSocket ovf:required="false">2</vmw:CoresPerSocket>
...
</ovf:VirtualSystem>
-</ovf:Envelope>
+</ovf:Envelope>
""") # noqa - trailing whitespace above is in base file
| [
"[email protected]"
] | |
27f37a64fd56abf5c84a6de0d251780d79d6574c | f889bc01147869459c0a516382e7b95221295a7b | /swagger_client/models/body_19.py | c3a0a50364961c0b3731d25640fcee6a6b0617c9 | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,764 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Body19(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'product': 'CatalogDataProductInterface',
'save_options': 'bool'
}
attribute_map = {
'product': 'product',
'save_options': 'saveOptions'
}
def __init__(self, product=None, save_options=None):
"""
Body19 - a model defined in Swagger
"""
self._product = None
self._save_options = None
self.product = product
if save_options is not None:
self.save_options = save_options
@property
def product(self):
"""
Gets the product of this Body19.
:return: The product of this Body19.
:rtype: CatalogDataProductInterface
"""
return self._product
@product.setter
def product(self, product):
"""
Sets the product of this Body19.
:param product: The product of this Body19.
:type: CatalogDataProductInterface
"""
if product is None:
raise ValueError("Invalid value for `product`, must not be `None`")
self._product = product
@property
def save_options(self):
"""
Gets the save_options of this Body19.
:return: The save_options of this Body19.
:rtype: bool
"""
return self._save_options
@save_options.setter
def save_options(self, save_options):
"""
Sets the save_options of this Body19.
:param save_options: The save_options of this Body19.
:type: bool
"""
self._save_options = save_options
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Body19):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
f4bac242f4ebcee19ff5f95406e835f40256a054 | 6118f2fa2be32a1b1d50a0965f7fa3e137b408bc | /examples/increment.py | 63228df71029b9ba4cb0701f57bd88b8a8616fee | [
"MIT"
] | permissive | eerimoq/mqttools | 66f296c3c76b4909c86d5d287e4a96b6b755bd44 | a28c86e89af0852249a5d6f33f9e67036c3eb8fe | refs/heads/master | 2021-12-27T04:35:33.868653 | 2021-12-24T12:15:01 | 2021-12-24T12:15:01 | 184,444,451 | 58 | 13 | MIT | 2021-11-30T19:04:53 | 2019-05-01T16:15:41 | Python | UTF-8 | Python | false | false | 799 | py | import asyncio
import mqttools
HOST = 'localhost'
PORT = 1883
async def main():
client = mqttools.Client(HOST, PORT)
await client.start()
print(f'Connected to {HOST}:{PORT}.')
await client.subscribe('/mqttools/incrementer/value/request')
print('Subscribed to topic /mqttools/incrementer/value/request.')
while True:
message = await client.messages.get()
if message is None:
print('Broker connection lost!')
break
count = int(message.message)
print(f'Request count: {count}')
count += 1
print(f'Response count: {count}')
client.publish(mqttools.Message('/mqttools/counter-client/value/response',
str(count).encode('ascii')))
asyncio.run(main())
| [
"[email protected]"
] | |
5c41edc54e8e9283a6870f5b3623e2c2ac088296 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayOpenPublicTopicModifyResponse.py | 6966bb50727abcc86c30bc3aab8df6044201e7f2 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 446 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenPublicTopicModifyResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenPublicTopicModifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayOpenPublicTopicModifyResponse, self).parse_response_content(response_content)
| [
"[email protected]"
] | |
c0501cb9929bcf12f787d370a8a1d9c9a0509d34 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/4/usersdata/133/2972/submittedfiles/swamee.py | 8bc3612585f4c89c2b305dc9bb6a96027440b5e3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CÓDIGO AQUI
f=input('Digite o valor de f:')
l=input('Digite o valor de l:')
q=input('Digite o valor de q:')
deltah=input('Digite o valor de deltah:')
v=input('Digite o valor de v:')
D = ((8*f*l*(q*q))/(((math.pi)**2)*9.81*deltah))**0.2
Rey = (4*q)/((math.pi)*D*v)
k = (0.25)/((math.log10((0.000002)/(3.7*D) + (5.74)/((Rey)**0.9)))**0.5)
print('%.4f, %.4f, %.4f' %D %Rey %k)
| [
"[email protected]"
] | |
9a6213180a3b7206990f12a3342fc9c1ae19a54d | 01733042e84a768b77f64ec24118d0242b2f13b8 | /uhd_restpy/testplatform/sessions/ixnetwork/quicktest/traffictest_9709f3566877e5d5fb6ae115268058c6.py | 4aff724fb98589bc548a121034ccba351b540266 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 13,531 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class TrafficTest(Base):
"""
The TrafficTest class encapsulates a list of trafficTest resources that are managed by the user.
A list of resources can be retrieved from the server using the TrafficTest.find() method.
The list can be managed by using the TrafficTest.add() and TrafficTest.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'trafficTest'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
def __init__(self, parent):
super(TrafficTest, self).__init__(parent)
@property
def Results(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.results_23583c0cce1dabf7b75fe7d2ae18cfc4.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.results_23583c0cce1dabf7b75fe7d2ae18cfc4 import Results
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_e903cacf836ab6df3c51be95da4a21df.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_e903cacf836ab6df3c51be95da4a21df import TestConfig
return TestConfig(self)._select()
@property
def TrafficSelection(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_22c0efed3052ed5002942a33e331fb3b.TrafficSelection): An instance of the TrafficSelection class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_22c0efed3052ed5002942a33e331fb3b import TrafficSelection
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
"""
Returns
-------
- bool: Apply QT config
"""
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Updates trafficTest resource on the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Adds a new trafficTest resource on the server and adds it to the container.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved trafficTest resources using find and the newly added trafficTest resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained trafficTest resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Finds and retrieves trafficTest resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve trafficTest resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all trafficTest resources from the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching trafficTest resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of trafficTest data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the trafficTest resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(InputParameters=string)list
-------------------------------
- InputParameters (str): The input arguments of the test.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(InputParameters=string)
-----------------------------
- InputParameters (str): The input arguments of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
| [
"[email protected]"
] | |
9d5268559b9f20871c0835f6a0a9edd415c007c8 | 280019d1106e6dd887f1c0fe020bcd433790d8e1 | /capture_tag/templatetags/capture_tags.py | 0b5e7b45174cd019e0a95bd44634aada96d9f16d | [
"Apache-2.0"
] | permissive | edoburu/django-capture-tag | 41af5dea34ec791791e03a95e2e52b88dd8c3ea8 | f63533dd1a5ce3926c36e5795a3767ab4d7eb6fc | refs/heads/master | 2023-08-14T05:05:00.023501 | 2021-11-16T22:04:50 | 2021-11-16T22:04:50 | 56,684,352 | 19 | 4 | Apache-2.0 | 2023-07-11T08:20:29 | 2016-04-20T12:19:46 | Python | UTF-8 | Python | false | false | 2,445 | py | from django.template import Library, Node, TemplateSyntaxError
register = Library()
@register.tag(name="capture")
def do_capture(parser, token):
"""
Capture the contents of a tag output.
Usage:
.. code-block:: html+django
{% capture %}..{% endcapture %} # output in {{ capture }}
{% capture silent %}..{% endcapture %} # output in {{ capture }} only
{% capture as varname %}..{% endcapture %} # output in {{ varname }}
{% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only
For example:
.. code-block:: html+django
{# Allow templates to override the page title/description #}
<meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" />
<title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title>
{# copy the values to the Social Media meta tags #}
<meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" />
<meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" />
"""
bits = token.split_contents()
# tokens
t_as = "as"
t_silent = "silent"
var = "capture"
silent = False
num_bits = len(bits)
if len(bits) > 4:
raise TemplateSyntaxError("'capture' node supports '[as variable] [silent]' parameters.")
elif num_bits == 4:
t_name, t_as, var, t_silent = bits
silent = True
elif num_bits == 3:
t_name, t_as, var = bits
elif num_bits == 2:
t_name, t_silent = bits
silent = True
else:
var = "capture"
silent = False
if t_silent != "silent" or t_as != "as":
raise TemplateSyntaxError("'capture' node expects 'as variable' or 'silent' syntax.")
nodelist = parser.parse(("endcapture",))
parser.delete_first_token()
return CaptureNode(nodelist, var, silent)
class CaptureNode(Node):
def __init__(self, nodelist, varname, silent):
self.nodelist = nodelist
self.varname = varname
self.silent = silent
def render(self, context):
output = self.nodelist.render(context)
context[self.varname] = output
if self.silent:
return ""
else:
return output
| [
"[email protected]"
] | |
7f50743de0d5d46bbfdc8495e78fe260fec55f25 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/communication/azure-communication-chat/tests/test_chat_thread_client_e2e_async.py | aea980942a7dfffee1131dd715df6326a604aeac | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 11,317 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import asyncio
import os
from datetime import datetime
from msrest.serialization import TZ_UTC
from azure.communication.administration import CommunicationIdentityClient
from azure.communication.chat.aio import (
ChatClient,
CommunicationUserCredential
)
from azure.communication.chat import (
ChatThreadMember,
ChatMessagePriority
)
from azure.communication.administration._shared.utils import parse_connection_str
from azure_devtools.scenario_tests import RecordingProcessor
from helper import URIIdentityReplacer
from chat_e2e_helper import ChatURIReplacer
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import BodyReplacerProcessor, ResponseReplacerProcessor
class ChatThreadClientTestAsync(AsyncCommunicationTestCase):
def setUp(self):
super(ChatThreadClientTestAsync, self).setUp()
self.recording_processors.extend([
BodyReplacerProcessor(keys=["id", "token", "senderId", "chatMessageId", "nextLink", "members", "multipleStatus", "value"]),
URIIdentityReplacer(),
ResponseReplacerProcessor(keys=[self._resource_name]),
ChatURIReplacer()])
endpoint, _ = parse_connection_str(self.connection_str)
self.endpoint = endpoint
self.identity_client = CommunicationIdentityClient.from_connection_string(self.connection_str)
# create user
self.user = self.identity_client.create_user()
token_response = self.identity_client.issue_token(self.user, scopes=["chat"])
self.token = token_response.token
# create another user
self.new_user = self.identity_client.create_user()
# create ChatClient
self.chat_client = ChatClient(self.endpoint, CommunicationUserCredential(self.token))
def tearDown(self):
super(ChatThreadClientTestAsync, self).tearDown()
# delete created users
if not self.is_playback():
self.identity_client.delete_user(self.user)
self.identity_client.delete_user(self.new_user)
async def _create_thread(self):
# create chat thread
topic = "test topic"
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
members = [ChatThreadMember(
user=self.user,
display_name='name',
share_history_time=share_history_time
)]
self.chat_thread_client = await self.chat_client.create_chat_thread(topic, members)
self.thread_id = self.chat_thread_client.thread_id
async def _send_message(self):
# send a message
priority = ChatMessagePriority.NORMAL
content = 'hello world'
sender_display_name = 'sender name'
create_message_result = await self.chat_thread_client.send_message(
content,
priority=priority,
sender_display_name=sender_display_name)
self.message_id = create_message_result.id
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_update_thread(self):
async with self.chat_client:
await self._create_thread()
topic = "update topic"
async with self.chat_thread_client:
await self.chat_thread_client.update_thread(topic=topic)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
priority = ChatMessagePriority.NORMAL
content = 'hello world'
sender_display_name = 'sender name'
create_message_result = await self.chat_thread_client.send_message(
content,
priority=priority,
sender_display_name=sender_display_name)
self.assertTrue(create_message_result.id)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_get_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
message = await self.chat_thread_client.get_message(self.message_id)
assert message.id == self.message_id
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_messages(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
if self.is_live:
await asyncio.sleep(2)
chat_messages = self.chat_thread_client.list_messages(results_per_page=1)
items = []
async for item in chat_messages:
items.append(item)
assert len(items) > 0
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_update_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
content = "updated message content"
await self.chat_thread_client.update_message(self.message_id, content=content)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_delete_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
await self.chat_thread_client.delete_message(self.message_id)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_members(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
chat_thread_members = self.chat_thread_client.list_members()
items = []
async for item in chat_thread_members:
items.append(item)
assert len(items) == 1
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_add_members(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_member = ChatThreadMember(
user=self.new_user,
display_name='name',
share_history_time=share_history_time)
members = [new_member]
await self.chat_thread_client.add_members(members)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_remove_member(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
# add member first
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_member = ChatThreadMember(
user=self.new_user,
display_name='name',
share_history_time=share_history_time)
members = [new_member]
await self.chat_thread_client.add_members(members)
# test remove member
await self.chat_thread_client.remove_member(self.new_user)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_typing_notification(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self.chat_thread_client.send_typing_notification()
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_read_receipt(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
await self.chat_thread_client.send_read_receipt(self.message_id)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_read_receipts(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
# send read receipts first
await self.chat_thread_client.send_read_receipt(self.message_id)
if self.is_live:
await asyncio.sleep(2)
# list read receipts
read_receipts = self.chat_thread_client.list_read_receipts()
items = []
async for item in read_receipts:
items.append(item)
assert len(items) > 0
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
| [
"[email protected]"
] | |
07c86867c6a6240b881b7799c91f53d202d3a79c | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/media/v20200201preview/_enums.py | 60f247261361ebfece18ad62df164c9d945509e3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'MediaGraphRtspTransport',
]
class MediaGraphRtspTransport(str, Enum):
"""
Underlying RTSP transport. This can be used to enable or disable HTTP tunneling.
"""
HTTP = "Http"
TCP = "Tcp"
| [
"[email protected]"
] | |
872530536f3f6682b50288fc140a34b61bc5ebd4 | 566754f63c0d665af01bdad8814873468f8be888 | /python/learn/numpy/boolean.py | c40817a9576b6a15190233b89e8ef2a9284a34e1 | [
"MIT"
] | permissive | qrsforever/workspace | 7f7b0363649b73e96526745f85a22e70b1c749c9 | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | refs/heads/master | 2022-05-04T18:58:41.562544 | 2020-05-25T04:07:00 | 2020-05-25T04:07:00 | 82,469,335 | 2 | 0 | MIT | 2022-04-12T21:54:15 | 2017-02-19T15:36:43 | Jupyter Notebook | UTF-8 | Python | false | false | 1,015 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import numpy.random as rand
def test1():
"""
& | 布尔运算
"""
arr = np.zeros((16, 16)) + 3
# 从第4行(列)到倒数第4行(列)
arr[4:-4, 4:-4] = 6
arr[7:-7, 7:-7] = 9
# print(arr)
index1 = arr > 2
index2 = arr < 6
compound_index = index1 & index2
compound_index = (arr > 3) & (arr < 9)
arr2 = np.copy(arr, order='K')
arr2[compound_index] = 0
print(arr2)
compound_index = (arr == 9) | (index1 & index2)
arr3 = np.copy(arr)
arr3[compound_index] = 0
print(arr3)
def test2():
"""
随机处理数据
"""
# 返回高斯分布(0, 1)的一个样本
arr = rand.randn(100)
print(arr)
# 采集数值大于0.2的子集
index = arr > 0.2
res = arr[index]
# 子集中的数据平方减2
res = res ** 2 - 2
# 放回去
arr[index] = res
print(arr)
def main():
test1()
test2()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
340d4560fceeb7f8f5ce6df9db6f28fa1f292720 | 2d4240a03bfa47386677a78250df220c55a7bf6c | /PythonCookbookLearning/chapter8/8.7.3.py | 52c8d9a319d86872ad3ba20726dd89ad240dfb57 | [] | no_license | Falonie/Notes | c7976e9e7514e5d7cddf918c3c54442a89532aab | 38e980cb5170a696626085b72795a096679e972b | refs/heads/master | 2022-02-13T11:20:39.613115 | 2019-09-02T01:07:27 | 2019-09-02T01:07:27 | 99,218,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | class Base(object):
def __init__(self):
print('Base.__init__')
class A(Base):
def __init__(self):
super().__init__()
print('A.__init__')
class B(Base):
def __init__(self):
super().__init__()
print('B.__init__')
class C(A, B):
def __init__(self):
super().__init__()
print('C.__init__')
if __name__ == '__main__':
c = C()
print(C.mro())
print(C.__mro__) | [
"[email protected]"
] | |
98a4f5409336a01c4f7a38567ca2dfcdf5371cbc | c28783b279c89ea98967064304eb3d883940b193 | /src/di_replication/repl_read_top_row/repl_read_top_row.py | b024ae54582d3a03c023813d070c989e91a29ca5 | [
"MIT"
] | permissive | thhapke/di_replication | c7784f7c60dee527c5498e99f66d390e94db8645 | f23e48d60c0d76603eec5071ea57d0646a44389e | refs/heads/master | 2023-02-14T14:22:54.876677 | 2021-01-14T11:40:55 | 2021-01-14T11:40:55 | 277,468,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import subprocess
import logging
import os
import random
from datetime import datetime, timezone
import pandas as pd
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
if port == outports[1]['name']:
api.queue.append(msg)
class config:
## Meta data
config_params = dict()
version = '0.0.1'
tags = {'sdi_utils': ''}
operator_name = 'repl_read_top_row'
operator_description = "Read Top Row"
operator_description_long = "Read top row without constraint."
add_readme = dict()
add_readme["References"] = ""
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
def process(msg):
att = dict(msg.attributes)
att['operator'] = 'repl_read_top_row'
logger, log_stream = slog.set_logging(att['operator'], loglevel=api.config.debug_mode)
sql = 'SELECT TOP 1 * FROM {table}'.format(table=att['replication_table'])
logger.info('SQL statement: {}'.format(sql))
att['sql'] = sql
api.send(outports[1]['name'], api.Message(attributes=att,body=sql))
log = log_stream.getvalue()
if len(log) > 0 :
api.send(outports[0]['name'], log )
inports = [{'name': 'data', 'type': 'message', "description": "Input data"}]
outports = [{'name': 'log', 'type': 'string', "description": "Logging data"}, \
{'name': 'msg', 'type': 'message', "description": "msg with sql statement"}]
#api.set_port_callback(inports[0]['name'], process)
def test_operator():
msg = api.Message(attributes={'packageid':4711,'replication_table':'repl_table','base_table':'repl_table','latency':30,\
'append_mode' : 'I', 'data_outcome':True},body='')
process(msg)
for msg in api.queue :
print(msg.attributes)
print(msg.body)
if __name__ == '__main__':
test_operator()
if True:
subprocess.run(["rm", '-r','../../../solution/operators/sdi_replication_' + api.config.version])
gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)
solution_name = api.config.operator_name + '_' + api.config.version
subprocess.run(["vctl", "solution", "bundle",'../../../solution/operators/sdi_replication_' + api.config.version, \
"-t", solution_name])
subprocess.run(["mv", solution_name + '.zip', '../../../solution/operators'])
| [
"[email protected]"
] | |
e7cb9d3d626b68c0e92c3fbeda2d26d8d2812576 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_protected_item_operation_results_operations.py | 686c9dc622990d21ec56f8dc1e20569d14958b9b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 6,058 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProtectedItemOperationResultsOperations(object):
"""ProtectedItemOperationResultsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
vault_name, # type: str
resource_group_name, # type: str
fabric_name, # type: str
container_name, # type: str
protected_item_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ProtectedItemResource"]
"""Fetches the result of any operation on the backup item.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backup item.
:type fabric_name: str
:param container_name: Container name associated with the backup item.
:type container_name: str
:param protected_item_name: Backup item name whose details are to be fetched.
:type protected_item_name: str
:param operation_id: OperationID which represents the operation whose result needs to be
fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectedItemResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.ProtectedItemResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ProtectedItemResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectedItemResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/operationResults/{operationId}'} # type: ignore
| [
"[email protected]"
] | |
fccdf002d3624704682080cfcfad0a8da497660d | 2be8a9f06d4003d12c0a727fb83d284c31a53050 | /HoudiniHotBox17.0/lib/mantrasurface.py | 14fd36488357f5ba0316020a601a4c3c65b4e2c7 | [] | no_license | LiuLiangFx/SmileHotBOX | 7551d9578b2defe612950cb8e3bffdb85024cede | 8bd8eac69b3c2a9824b9aa4488ca77789bea8d85 | refs/heads/master | 2021-01-01T10:22:26.959731 | 2020-02-09T03:16:32 | 2020-02-09T03:16:32 | 239,236,801 | 0 | 0 | null | 2020-02-09T02:47:18 | 2020-02-09T02:47:18 | null | UTF-8 | Python | false | false | 138 | py | import hou
class mantrasurface:
def run(self):
node = hou.node("/shop")
node.createNode("mantrasurface")
| [
"[email protected]"
] | |
086a8df0c8339b236cf7ca37ad68644942a570e2 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_2203.py | d82b9273211c6488b6c4ed00ac417bea5f7bc2fb | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # I want the actual file name that is returned by a PHP script
urllib.urlretrieve(URL, directory + "\\" + filename + "." + extension)
| [
"[email protected]"
] | |
c208e65a004c73d0f48559a896158d1e95398217 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/EDataServer/SourceAutocompletePrivate.py | 1de46992f4fdcbcee66390bb6885068da4e6eb42 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,505 | py | # encoding: utf-8
# module gi.repository.EDataServer
# from /usr/lib64/girepository-1.0/EDataServer-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Soup as __gi_repository_Soup
import gobject as __gobject
class SourceAutocompletePrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(SourceAutocompletePrivate), '__module__': 'gi.repository.EDataServer', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'SourceAutocompletePrivate' objects>, '__weakref__': <attribute '__weakref__' of 'SourceAutocompletePrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(SourceAutocompletePrivate)
| [
"[email protected]"
] | |
2a5b864a3ebcf588854fa0df2b2e9e32ddbee910 | 3fd6e85c36a7e9e4f9ddec163a55f3602ccfb98c | /hw/gimbal/firmware/site_scons/site_tools/arm_none_eabi.py | 1ed9fbfb9debd2019a680a3816a74982f6a83789 | [
"Apache-2.0"
] | permissive | SiChiTong/mjmech | acc5da4ac6edd9f1446cc13e471aedeea3e1c419 | a71f35e6ad6bc9c1530a0a33d68c45d073390b79 | refs/heads/master | 2020-03-20T03:44:13.276650 | 2018-05-06T02:59:55 | 2018-05-06T03:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | # Copyright 2015 Josh Pieper, [email protected]. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TOOLS = {
'CC' : 'gcc',
'CXX' : 'g++',
'LD' : 'gcc',
'AR' : 'ar',
'AS' : 'gcc',
'OBJCOPY' : 'objcopy',
'OBJDUMP' : 'objdump',
}
def generate(env, **kwargs):
# Let's assume that the host version of the compiler is here and
# available.
gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas']
for tool in gnu_tools:
env.Tool(tool)
for key, value in TOOLS.iteritems():
env[key] = 'arm-none-eabi-' + value
env.Append(ASFLAGS=['-c'])
env['PROGSUFFIX'] = '.elf'
def exists(env):
return 1
| [
"[email protected]"
] | |
e62bb40c823f97a4d88b9ee4884e3edb00c40a0d | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/dhcp/clientaddr.py | 134e0b93623bfb9ff57a9db85dbee07038399b49 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 3,567 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ClientAddr(Mo):
"""
The DHCP address received for the client.
"""
meta = ClassMeta("cobra.model.dhcp.ClientAddr")
meta.moClassName = "dhcpClientAddr"
meta.rnFormat = "addr-[%(address)s]"
meta.category = MoCategory.REGULAR
meta.label = "Client Address"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.dhcp.ClientIf")
meta.superClasses.add("cobra.model.dhcp.Addr")
meta.rnPrefixes = [
('addr-', True),
]
prop = PropMeta("str", "address", "address", 6133, PropCategory.REGULAR)
prop.label = "Address"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("address", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "address"))
getattr(meta.props, "address").needDelimiter = True
def __init__(self, parentMoOrDn, address, markDirty=True, **creationProps):
namingVals = [address]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
b8d9f5ee64570bdf0c15ab55e124cd7e677cb144 | be429a1e5e4903616a4532c1bf238df20fea75c0 | /6.14/127.单词接龙.py | 3f21e478f21ed4a7dfac01e5e07586b2c193a049 | [] | no_license | pythonnewbird/LeetCodeSolution | ccc8cc17df4cea3109d84b0c347ae91c1bc33a28 | 2447f760f08fb3879c5f03d8650e30ff74115d3d | refs/heads/master | 2020-03-19T05:06:06.681429 | 2018-07-01T12:39:09 | 2018-07-01T12:39:09 | 135,899,944 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
wdset=set(wordList)
if endWord not in wdset:
return 0
lts="abcdefghijklmnopqrstuvwxyz"
dist=float("inf")
q=[beginWord]
seen={beginWord:0}
graph={beginWord:set()}
while q:
cur=q.pop(0)
d=seen[cur]
if d>=dist:
break
for i in range(len(cur)):
for lt in lts:
if lt!=cur[i]:
new=cur[:i]+lt+cur[i+1:]
if new in wdset and (new not in seen or d+1==seen[new]):
if cur in graph:
graph[cur].add(new)
else:
graph[cur]=set([new])
if new==endWord:
dist=d+1
if new not in seen:
seen[new]=d+1
q.append(new)
if dist!=float('inf') :
return dist+1
else:
return 0 | [
"[email protected]"
] | |
512a43263d45f6d4fbf19a27ad961a1de09eba30 | fa5cb3cb27132a330673650afa1d68dd35f15251 | /newrelic/core/thread_utilization.py | fd57ba9f86fc98a03d51ad739747f385a68950b0 | [
"Apache-2.0"
] | permissive | jbeveland27/newrelic-python-agent | 95b4fdf253915100bc62bbd143066f589efc3ab9 | 86c78370ace1eba18e05de5e37aadb880f5f3ac4 | refs/heads/main | 2023-07-12T06:40:58.741312 | 2021-08-19T23:37:14 | 2021-08-19T23:37:14 | 398,122,410 | 1 | 0 | Apache-2.0 | 2021-08-20T01:38:35 | 2021-08-20T01:38:33 | null | UTF-8 | Python | false | false | 4,451 | py | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from newrelic.samplers.decorators import data_source_factory
try:
from newrelic.core._thread_utilization import ThreadUtilization
except ImportError:
ThreadUtilization = None
_utilization_trackers = {}
def utilization_tracker(application):
return _utilization_trackers.get(application)
class ThreadUtilizationDataSource(object):
def __init__(self, application):
self._consumer_name = application
self._utilization_tracker = None
self._last_timestamp = None
self._utilization = None
def start(self):
if ThreadUtilization:
utilization_tracker = ThreadUtilization()
_utilization_trackers[self._consumer_name] = utilization_tracker
self._utilization_tracker = utilization_tracker
self._last_timestamp = time.time()
self._utilization = self._utilization_tracker.utilization_count()
def stop(self):
try:
self._utilization_tracker = None
self._last_timestamp = None
self._utilization = None
del _utilization_trackers[self.source_name]
except Exception:
pass
def __call__(self):
if self._utilization_tracker is None:
return
now = time.time()
# TODO This needs to be pushed down into _thread_utilization.c.
# In doing that, need to fix up UtilizationClass count so the
# reset is optional because in this case a read only variant is
# needed for getting a per request custom metric of the
# utilization during period of the request.
#
# TODO This currently doesn't take into consideration coroutines
# and instance bust percentage is percentage of a single thread
# and not of total available coroutines. Not sure whether can
# generate something meaningful for coroutines. Also doesn't
# work for asynchronous systems such as Twisted.
new_utilization = self._utilization_tracker.utilization_count()
elapsed_time = now - self._last_timestamp
utilization = new_utilization - self._utilization
utilization = utilization / elapsed_time
self._last_timestamp = now
self._utilization = new_utilization
total_threads = None
try:
# Recent mod_wsgi versions publish the number of actual
# threads so we can use this real value instead of the
# calculated value. This is important in order to get the
# correct utilization value for mod_wsgi daemon mode as the
# way it manages the thread pool it may not actually
# activate all available threads if the requirement isn't
# there for them. Thus the utilization figure will be too
# high as would only be calculated relative to the activated
# threads and not the total of what is actually available.
import mod_wsgi
total_threads = mod_wsgi.threads_per_process
except Exception:
pass
if total_threads is None:
total_threads = self._utilization_tracker.total_threads()
if total_threads:
# Don't report any metrics if don't detect any threads
# available and in use for handling web transactions,
# otherwise we end up report zero metrics for task systems
# such as Celery which skews the results wrongly.
yield ('Instance/Available', total_threads)
yield ('Instance/Used', utilization)
busy = total_threads and utilization/total_threads or 0.0
yield ('Instance/Busy', busy)
@data_source_factory(name='Thread Utilization')
def thread_utilization_data_source(settings, environ):
return ThreadUtilizationDataSource(environ['consumer.name'])
| [
"[email protected]"
] | |
badc3f499d4497e8cb855a63837126b58ed27f20 | 758b475451f96bb63b0fd4922beb7b6e1ed40137 | /PyCalendar/branches/server-stable/src/pycalendar/vtodo.py | 9dbe55e934316b748a5ee5e034cfe4f1126faa63 | [
"Apache-2.0"
] | permissive | svn2github/calendarserver-raw | da9f0bfa94d8080a9803eab5e4630be21645b329 | 37edd10248e8c13dc13f65ff52c72df1783e3499 | refs/heads/master | 2023-01-11T00:51:40.137861 | 2015-01-07T00:07:46 | 2015-01-07T00:07:46 | 14,276,537 | 1 | 0 | null | 2022-12-21T13:08:16 | 2013-11-10T13:09:44 | Python | UTF-8 | Python | false | false | 10,786 | py | ##
# Copyright (c) 2007-2011 Cyrus Daboo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar import definitions
from pycalendar import itipdefinitions
from pycalendar.componentrecur import PyCalendarComponentRecur
from pycalendar.datetime import PyCalendarDateTime
from pycalendar.property import PyCalendarProperty
import cStringIO as StringIO
class PyCalendarVToDo(PyCalendarComponentRecur):
OVERDUE = 0
DUE_NOW= 1
DUE_LATER = 2
DONE = 3
CANCELLED= 4
@staticmethod
def sort_for_display(e1, e2):
s1 = e1.getMaster()
s2 = e2.getMaster()
# Check status first (convert None -> Needs action for tests)
status1 = s1.self.mStatus
status2 = s2.self.mStatus
if status1 == definitions.eStatus_VToDo_None:
status1 = definitions.eStatus_VToDo_NeedsAction
if status2 == definitions.eStatus_VToDo_None:
status2 = definitions.eStatus_VToDo_NeedsAction
if status1 != status2:
# More important ones at the top
return status1 < status2
# At this point the status of each is the same
# If status is cancelled sort by start time
if s1.self.mStatus == definitions.eStatus_VToDo_Cancelled:
# Older ones at the bottom
return s1.mStart > s2.mStart
# If status is completed sort by completion time
if s1.self.mStatus == definitions.eStatus_VToDo_Completed:
# Older ones at the bottom
return s1.self.mCompleted > s2.self.mCompleted
# Check due date exists
if s1.mHasEnd != s2.mHasEnd:
now = PyCalendarDateTime()
now.setToday()
# Ones with due dates after today below ones without due dates
if s1.hasEnd():
return s1.mEnd <= now
elif s2.hasEnd():
return now < s2.mEnd
# Check due dates if present
if s1.mHasEnd:
if s1.mEnd != s2.mEnd:
# Soonest dues dates above later ones
return s1.mEnd < s2.mEnd
# Check priority next
if s1.self.mPriority != s2.self.mPriority:
# Higher priority above lower ones
return s1.self.mPriority < s2.self.mPriority
# Just use start time - older ones at the top
return s1.mStart < s2.mStart
def __init__(self, parent=None):
super(PyCalendarVToDo, self).__init__(parent=parent)
self.mPriority = 0
self.mStatus = definitions.eStatus_VToDo_None
self.mPercentComplete = 0
self.mCompleted = PyCalendarDateTime()
self.mHasCompleted = False
def duplicate(self, parent=None):
other = super(PyCalendarVToDo, self).duplicate(parent=parent)
other.mPriority = self.mPriority
other.mStatus = self.mStatus
other.mPercentComplete = self.mPercentComplete
other.mCompleted = self.mCompleted.duplicate()
other.mHasCompleted = self.mHasCompleted
return other
def getType(self):
return definitions.cICalComponent_VTODO
def getMimeComponentName(self):
return itipdefinitions.cICalMIMEComponent_VTODO
def addComponent(self, comp):
# We can embed the alarm components only
if comp.getType() == definitions.cICalComponent_VALARM:
super(PyCalendarVToDo, self).addComponent(comp)
else:
raise ValueError
def getStatus(self):
return self.mStatus
def setStatus(self, status):
self.mStatus = status
def getStatusText(self):
sout = StringIO()
if self.mStatus in (definitions.eStatus_VToDo_NeedsAction, definitions.eStatus_VToDo_InProcess):
if self.hasEnd():
# Check due date
today = PyCalendarDateTime()
today.setToday()
if self.getEnd() > today:
sout.append("Due: ")
whendue = self.getEnd() - today
if (whendue.getDays() > 0) and (whendue.getDays() <= 7):
sout.write(whendue.getDays())
sout.write(" days")
else:
sout.write(self.getEnd().getLocaleDate(PyCalendarDateTime.NUMERICDATE))
elif self.getEnd() == today:
sout.write("Due today")
else:
sout.write("Overdue: ")
overdue = today - self.getEnd()
if overdue.getWeeks() != 0:
sout.write(overdue.getWeeks())
sout.write(" weeks")
else:
sout.write(overdue.getDays() + 1)
sout.write(" days")
else:
sout.write("Not Completed")
elif self.mStatus == definitions.eStatus_VToDo_Completed:
if self.hasCompleted():
sout.write("Completed: ")
sout.write(self.getCompleted().getLocaleDate(PyCalendarDateTime.NUMERICDATE))
else:
sout.write("Completed")
elif definitions.eStatus_VToDo_Cancelled:
sout.write("Cancelled")
return sout.toString()
def getCompletionState(self):
if self.mStatus in (definitions.eStatus_VToDo_NeedsAction, definitions.eStatus_VToDo_InProcess):
if self.hasEnd():
# Check due date
today = PyCalendarDateTime()
today.setToday()
if self.getEnd() > today:
return PyCalendarVToDo.DUE_LATER
elif self.getEnd() == today:
return PyCalendarVToDo.DUE_NOW
else:
return PyCalendarVToDo.OVERDUE
else:
return PyCalendarVToDo.DUE_NOW
elif self.mStatus == definitions.eStatus_VToDo_Completed:
return PyCalendarVToDo.DONE
elif self.mStatus == definitions.eStatus_VToDo_Cancelled:
return PyCalendarVToDo.CANCELLED
def getPriority(self):
return self.mPriority
def setPriority(self, priority):
self.mPriority = priority
def getCompleted(self):
return self.mCompleted
def hasCompleted(self):
return self.mHasCompleted
def finalise(self):
# Do inherited
super(PyCalendarVToDo, self).finalise()
# Get DUE
temp = self.loadValueDateTime(definitions.cICalProperty_DUE)
if temp is None:
# Try DURATION instead
temp = self.loadValueDuration(definitions.cICalProperty_DURATION)
if temp is not None:
self.mEnd = self.mStart + temp
self.mHasEnd = True
else:
self.mHasEnd = False
else:
self.mHasEnd = True
self.mEnd = temp
# Get PRIORITY
self.mPriority = self.loadValueInteger(definitions.cICalProperty_PRIORITY)
# Get STATUS
temp = self.loadValueString(definitions.cICalProperty_STATUS)
if temp is not None:
if temp == definitions.cICalProperty_STATUS_NEEDS_ACTION:
self.mStatus = definitions.eStatus_VToDo_NeedsAction
elif temp == definitions.cICalProperty_STATUS_COMPLETED:
self.mStatus = definitions.eStatus_VToDo_Completed
elif temp == definitions.cICalProperty_STATUS_IN_PROCESS:
self.mStatus = definitions.eStatus_VToDo_InProcess
elif temp == definitions.cICalProperty_STATUS_CANCELLED:
self.mStatus = definitions.eStatus_VToDo_Cancelled
# Get PERCENT-COMPLETE
self.mPercentComplete = self.loadValueInteger(definitions.cICalProperty_PERCENT_COMPLETE)
# Get COMPLETED
temp = self.loadValueDateTime(definitions.cICalProperty_COMPLETED)
self.mHasCompleted = temp is not None
if self.mHasCompleted:
self.mCompleted = temp
# Editing
def editStatus(self, status):
# Only if it is different
if self.mStatus != status:
# Updated cached values
self.mStatus = status
# Remove existing STATUS & COMPLETED items
self.removeProperties(definitions.cICalProperty_STATUS)
self.removeProperties(definitions.cICalProperty_COMPLETED)
self.mHasCompleted = False
# Now create properties
value = None
if status == definitions.eStatus_VToDo_NeedsAction:
value = definitions.cICalProperty_STATUS_NEEDS_ACTION
if status == definitions.eStatus_VToDo_Completed:
value = definitions.cICalProperty_STATUS_COMPLETED
# Add the completed item
self.mCompleted.setNowUTC()
self.mHasCompleted = True
prop = PyCalendarProperty(definitions.cICalProperty_STATUS_COMPLETED, self.mCompleted)
self.addProperty(prop)
elif status == definitions.eStatus_VToDo_InProcess:
value = definitions.cICalProperty_STATUS_IN_PROCESS
elif status == definitions.eStatus_VToDo_Cancelled:
value = definitions.cICalProperty_STATUS_CANCELLED
prop = PyCalendarProperty(definitions.cICalProperty_STATUS, value)
self.addProperty(prop)
def editCompleted(self, completed):
# Remove existing COMPLETED item
self.removeProperties(definitions.cICalProperty_COMPLETED)
self.mHasCompleted = False
# Always UTC
self.mCompleted = completed.duplicate()
self.mCompleted.adjustToUTC()
self.mHasCompleted = True
prop = PyCalendarProperty(definitions.cICalProperty_STATUS_COMPLETED, self.mCompleted)
self.addProperty(prop)
def sortedPropertyKeyOrder(self):
return (
definitions.cICalProperty_UID,
definitions.cICalProperty_RECURRENCE_ID,
definitions.cICalProperty_DTSTART,
definitions.cICalProperty_DURATION,
definitions.cICalProperty_DUE,
definitions.cICalProperty_COMPLETED,
)
| [
"cyrusdaboo@e27351fd-9f3e-4f54-a53b-843176b1656c"
] | cyrusdaboo@e27351fd-9f3e-4f54-a53b-843176b1656c |
bf84a23ac25841aaf18ddc5f2a8785a878f6e123 | 3313419e883041b04bd09f7e905dc9fb24cd8ec8 | /multi_kmeans_group_line_chart.py | 7559a28604b94a194b5308ec440890374719a7d0 | [] | no_license | xiaosean/preprocess_py | d6d46a91be0d31c3ac082c4dc21587b27b34bf11 | fa480a0f8401c4ccff61ea8215bcf40802b2ba36 | refs/heads/master | 2023-06-23T17:30:19.296637 | 2017-11-06T23:12:02 | 2017-11-06T23:12:04 | 90,627,403 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,789 | py | import numpy as np
import pandas as pd
from time import time
from sklearn.cluster import KMeans
from pandas.computation import expressions as expr
from bokeh.charts import Line, show, output_file, save
import pprint as pp
import sys
from bokeh.palettes import Spectral11, Category10
# set configure
# path = "./CDR_NORMALIZE_CONCAT/"
path = "./CDR_FINAL/"
filename = "0703normalize_65_cols.csv"
# path = "./CDR_ANALYZE/"
# path = "./CDR_CONCAT/"
# filename = sys.argv[1]
# filename = "CDR_CONCAT_TABLE_4_max_min.csv"
# filename = "CDR_CONCAT_TABLE_4.csv"
relative_filename = path + filename #+ ".csv"
# out_filename = "mds_mly_minus20160901"
# after the numeric_st_idx's number will be tranform to int64
# numeric_st_idx = 1
# K = 8
label_path = "./kmean_label/"
output_path = "./CDR_CONCAT_ANALYZE_GRAPH/"
# output_path = "./CDR_CONCAT_ANALYZE_GRAPH_MINI/"
# read revise csv file and print cost time
# just load 5 data
t0 = time()
df_src = pd.read_csv(relative_filename, error_bad_lines=False)
print("time for read csv: %.2f" % (time()-t0))
# -------------------------
for c in df_src.columns[1:]:
if not "CNT" in c:
df_src = df_src.drop(c, 1)
# ----------------------------
# df = df.drop('MINING_DW_SUBSCR_NO', 1)
df_group = pd.read_csv('DNA_KMEANS_RESULT_ID_NEW.csv', error_bad_lines = False)
groups_name = ['1', '2', '3', '4', '5', '6', '7', '8', 'seldom', 'None']
# groups_name = ['1', '2', '3', '4', '5', '6', '7', '8']
# Ks = [8, 7, 6, 7, 5, 6, 6, 8, 7, 7]
Ks = [6, 4, 6, 7, 7, 6, 8, 7, 7, 7]
evening = "Evening user"
moring = "Morning user"
midnight = "Midnight user"
lunch = "Lunch time user"
All = "All day user"
dinner = "Dinner time user"
afternoon = "Afternoon user"
cluster_name = [
{107141:moring, 121153:midnight, 17176:lunch, 59992:afternoon, 32089:evening, 70046:dinner},
{25449:evening, 30950:dinner, 5441:lunch, 59944:midnight, 62860:All},
{20553:afternoon, 20809:dinner, 26798:moring, 32848:midnight, 4801:lunch},
{17959:evening, 24518:dinner, 33790:moring, 35510:midnight, 5181:lunch},
{17238:evening, 25183:dinner, 32834:moring, 31327:midnight, 3892:lunch},
{14298:midnight, 21404:"Late midnight user", 35439:moring, 35802:dinner, 39104:"Office time user"},
{19744:evening, 24966:afternoon, 33129:"Night user", 41770:moring, 44540:midnight},
{106596:dinner, 124046:moring, 146613:midnight, 21343:lunch, 91568:afternoon}
]
norm = "0704"
df_src['Groups'] = df_group['Groups']
for j in range(8):
K = Ks[j]
group = groups_name[j]
df = df_src[df_src['Groups'] == group]
label_path = "./kmean_label/"
# label_name = "label_K" + str(K) + "_de_with_kid_" + group + "_" + norm + ".npy"
label_name = "label_K" + str(K) + "__" + group + "_" + norm + ".npy"
labels_ = np.load(label_path + label_name)
# df.loc['label',list(map(str, df.index))] = labels_
df['label'] = labels_
grouped = df.groupby('label')
print(group)
df['label'] = labels_
grouped = df.drop(['MINING_DW_SUBSCR_NO', 'Groups'], 1).groupby('label')
# grouped = df.groupby('label')
# get count
group_count = grouped[df.columns[1]].count().values
# df = df.drop('MINING_DW_SUBSCR_NO', 1)
# get mean
group_mean = grouped.mean()
# cluster_name = {1012:'每通通話量長', 1470990:'幾乎不用', 23626:'高度使用', 283083:'有在使用', 48456:'夜貓族', 3601:'超高度使用', 68665:'中度使用', 697364:'稍微使用'}
# aggregate display data
data = {}
for i in range(K):
# data[str(i)] = grouped.mean().values[i]
# if "HOUR" in filename:
# # data[cluster_name[cluster_result[i]] + "(" + str(cluster_result[i]) + ")"] = list(map(lambda x: x/30,grouped.mean().values[i]))
# # data["(" + str(group_count[i]) + ")"] = list(map(lambda x: x/30, group_mean.values[i][1:]))
# else:
# # data[cluster_name[cluster_result[i]] + "(" + str(cluster_result[i]) + ")"] = list(map(lambda x: x/4,grouped.mean().values[i]))
# data["(" + str(group_count[i]) + ")"] = list(map(lambda x: x/4, group_mean.values[i][1:]))
# data[cluster_name[j][group_count[i]] + "(" + str(group_count[i]) + ")"] = group_mean.values[i]
data["(" + str(group_count[i]) + ")"] = group_mean.values[i]
# data[str(cluster_name[i])] = grouped.mean().values[i]
pp.pprint(df.columns[1:-2])
# select label
# xl = str(df.columns)
# xl = "MO_0_24 MT_0_24 MO_SUN_SAT_w_h MT_SUN_SAT_w_h"
xl = "hour"
# if filename.find("WORK") != -1:
# xl = str(df.columns[1:])
# elif filename.find("hours") == -1:
# xl = "SUN ~ SAT"
# yl = "time"
# if filename.find("TIME") == -1:
# yl = "count"
yl = "percentage"
# draw
# # set line colors
# mycolors = []
# # if K > 5:
# # mycolors = Spectral11[0:5] + Spectral11[6:K + 1]
# # else:
# # mycolors = Spectral11[0:K]
# for i in range(K):
# mycolors.append(Spectral11[i * 2])
title = "Group " + group
line = Line(data, ylabel = yl, xlabel = xl, color = Category10[10], title = title, legend = "top_center")
# line = Line(data, ylabel = 'mean ' + sys.argv[2], xlabel = xl)
# line.legend.orientation = 'horizontal'
legend = line.legend
legend.plot = None
legend.location = (0 , 300)
line.add_layout(legend[0], "right")
line.xaxis.axis_label_text_font_size = '20px'
line.yaxis.axis_label_text_font_size = '20px'
line.title.text_font_size = '30px'
# save file
# output_file("test_K" + str(i + 1) + ".html")
output_filename = ("%s_K%d_G%s_%s_line.html" % (filename[:-4], K, group, norm))
output_file(output_path + output_filename)
# output_file(output_path + filename[:-4] + "_K" + str(K) + "_NAME_distribution.html")
save(line)
# show(line)
# # save file
# # output_file("test_K" + str(i + 1) + ".html")
# line.title.text = title + " DETAIL"
# output_file(output_path + filename[:-4] + "_K" + str(K) + "_NAME_LARGE_distribution.html")
# save(line)
# # show(line) | [
"[email protected]"
] | |
9ee8c45a0a1b6322c5d70c2a111aabf4840f4924 | 40a8d8c2c2bb98b6a27dd62825a601597e2970c5 | /home/management/commands/load_initial_data.py | 7fac54795d1eb102cf1db3da1f3040d1d30edc49 | [] | no_license | crowdbotics-users/elmedina3crowdboticsc-643 | 24dba124e00a7bef109cadebd9b5f07c4545c886 | 59274ebe30f52bab5455b4d853b4a75d6bd97c66 | refs/heads/master | 2020-04-16T04:51:54.804699 | 2019-01-11T17:36:08 | 2019-01-11T17:36:08 | 165,283,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">dj-app-160</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'dj-app-160'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"[email protected]"
] | |
a47d8415c94513aab2c7019425699a484a4715b5 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/list_child_issues_v4_response.py | 3c616232bd4fb42bfe7dfe5c4fc8ea0de1d33c81 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListChildIssuesV4Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'issues': 'list[IssueResponseV4]',
'total': 'int'
}
attribute_map = {
'issues': 'issues',
'total': 'total'
}
def __init__(self, issues=None, total=None):
"""ListChildIssuesV4Response - a model defined in huaweicloud sdk"""
super(ListChildIssuesV4Response, self).__init__()
self._issues = None
self._total = None
self.discriminator = None
if issues is not None:
self.issues = issues
if total is not None:
self.total = total
@property
def issues(self):
"""Gets the issues of this ListChildIssuesV4Response.
工作项列表
:return: The issues of this ListChildIssuesV4Response.
:rtype: list[IssueResponseV4]
"""
return self._issues
@issues.setter
def issues(self, issues):
"""Sets the issues of this ListChildIssuesV4Response.
工作项列表
:param issues: The issues of this ListChildIssuesV4Response.
:type: list[IssueResponseV4]
"""
self._issues = issues
@property
def total(self):
"""Gets the total of this ListChildIssuesV4Response.
总数
:return: The total of this ListChildIssuesV4Response.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListChildIssuesV4Response.
总数
:param total: The total of this ListChildIssuesV4Response.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListChildIssuesV4Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
7b25d3a0b38ecf71f28ab8ade8d455c4f755784e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/redreader/testcase/firstcases/testcase8_000.py | 487ccbe5525b05345cc0e3f99e97b78ace629218 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.quantumbadger.redreader',
'appActivity' : 'org.quantumbadger.redreader.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.quantumbadger.redreader/org.quantumbadger.redreader.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase000
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"All Subreddits\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Sort Posts\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Sort Posts\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"8_000\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.quantumbadger.redreader'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
c74462826047dc31ce78bc75478dd40b6448f120 | ef9cb55b02ababca58ce6880b575120c4f28fdb9 | /blog/models.py | acf0d8f124a21c803016a1554c92811cf188da95 | [] | no_license | mikohan/portfolio | dc6226f3aee73b5af181a0ecc9a13668dde2fe4e | 51fda71a4fecec77ff207eb94e514f6924eaf44e | refs/heads/master | 2020-05-30T09:11:51.866902 | 2019-06-04T05:29:27 | 2019-06-04T05:29:27 | 189,637,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=255)
pub_date = models.DateTimeField()
body = models.TextField()
image = models.ImageField(upload_to='images/')
def __str__(self):
return self.title
def cut(self):
split = self.body.split()
desc = ' '.join(split[:40])
return desc
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
| [
"[email protected]"
] | |
6a6cb216fa9aee4db2182b77261d6d65dfd2fed7 | d7d524d1c0ba1cf62cdbc2f9bf5b9c66fa56726b | /armstrong interval.py | 626c90c1598da113f54258b1c5f678f4dccdac20 | [] | no_license | ramyasutraye/pythonproject | d997ca5ada024e211b6bf087d0d56684daf9df8b | 38975a99eb3ee1ad9e79a9efd538cc992d249fc3 | refs/heads/master | 2020-04-23T19:30:10.128774 | 2018-05-25T06:18:53 | 2018-05-25T06:18:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | a = int(input("Enter lower range: "))
b = int(input("Enter upper range: "))
for num in range(a, b + 1):
order = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if num == sum:
print(num)
| [
"[email protected]"
] | |
6b5991808844bf4bf53bb9ef1f2ba289ed0cbe2d | 6846a0469efc79b89edc8f856944d5a8005d7244 | /id_0123.py | 8f711263e8087edcc8d3178a22f25e1d21fd0249 | [] | no_license | CGenie/project_euler | 42cb966e13645339490046eb44a729660ae0c092 | cc90edd061b0f4d9e076d5a684b842c202a6812a | refs/heads/master | 2020-06-05T00:41:49.266961 | 2014-01-13T19:11:31 | 2014-01-13T19:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | #!/usr/bin/python
# #####################################################################
# id_0123.py
#
# Przemyslaw Kaminski <[email protected]>
# Time-stamp: <>
######################################################################
from helper_py3 import memoize
def gen_primes():
lst_primes = [2]
yield 2
p = 3
while True:
prime = True
for x in lst_primes:
if p % x == 0:
prime = False
break
if prime:
lst_primes.append(p)
yield p
p += 2
if __name__ == '__main__':
gp = gen_primes()
M = 10**10
n = 0
while True:
pn = next(gp)
n += 1
if pn**2 >= M:
ret = ((-1)**n + 1 + ((-1)**(n - 1) + 1)*n*pn) % pn**2
if (n + 1) % 100 == 0:
print("pn = " + str(pn) + ", n = " + str(n) + ", ret = " + str(ret))
if ret > M:
print("sol = " + str(n))
break
| [
"[email protected]"
] | |
6a22e8f4dffd272e12fba138916e4c7de47b0cfc | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/geomap_widget_definition.py | a47dba11739a217d6ebe6cc92133fe5fc63bbc9e | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 5,014 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import List, Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.geomap_widget_request import GeomapWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition_style import GeomapWidgetDefinitionStyle
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.geomap_widget_definition_type import GeomapWidgetDefinitionType
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
class GeomapWidgetDefinition(ModelNormal):
validations = {
"requests": {
"max_items": 1,
"min_items": 1,
},
}
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.geomap_widget_request import GeomapWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition_style import GeomapWidgetDefinitionStyle
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.geomap_widget_definition_type import GeomapWidgetDefinitionType
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
return {
"custom_links": ([WidgetCustomLink],),
"requests": ([GeomapWidgetRequest],),
"style": (GeomapWidgetDefinitionStyle,),
"time": (WidgetTime,),
"title": (str,),
"title_align": (WidgetTextAlign,),
"title_size": (str,),
"type": (GeomapWidgetDefinitionType,),
"view": (GeomapWidgetDefinitionView,),
}
attribute_map = {
"custom_links": "custom_links",
"requests": "requests",
"style": "style",
"time": "time",
"title": "title",
"title_align": "title_align",
"title_size": "title_size",
"type": "type",
"view": "view",
}
def __init__(
self_,
requests: List[GeomapWidgetRequest],
style: GeomapWidgetDefinitionStyle,
type: GeomapWidgetDefinitionType,
view: GeomapWidgetDefinitionView,
custom_links: Union[List[WidgetCustomLink], UnsetType] = unset,
time: Union[WidgetTime, UnsetType] = unset,
title: Union[str, UnsetType] = unset,
title_align: Union[WidgetTextAlign, UnsetType] = unset,
title_size: Union[str, UnsetType] = unset,
**kwargs,
):
"""
This visualization displays a series of values by country on a world map.
:param custom_links: A list of custom links.
:type custom_links: [WidgetCustomLink], optional
:param requests: Array of one request object to display in the widget. The request must contain a ``group-by`` tag whose value is a country ISO code.
See the `Request JSON schema documentation <https://docs.datadoghq.com/dashboards/graphing_json/request_json>`_
for information about building the ``REQUEST_SCHEMA``.
:type requests: [GeomapWidgetRequest]
:param style: The style to apply to the widget.
:type style: GeomapWidgetDefinitionStyle
:param time: Time setting for the widget.
:type time: WidgetTime, optional
:param title: The title of your widget.
:type title: str, optional
:param title_align: How to align the text on the widget.
:type title_align: WidgetTextAlign, optional
:param title_size: The size of the title.
:type title_size: str, optional
:param type: Type of the geomap widget.
:type type: GeomapWidgetDefinitionType
:param view: The view of the world that the map should render.
:type view: GeomapWidgetDefinitionView
"""
if custom_links is not unset:
kwargs["custom_links"] = custom_links
if time is not unset:
kwargs["time"] = time
if title is not unset:
kwargs["title"] = title
if title_align is not unset:
kwargs["title_align"] = title_align
if title_size is not unset:
kwargs["title_size"] = title_size
super().__init__(kwargs)
self_.requests = requests
self_.style = style
self_.type = type
self_.view = view
| [
"[email protected]"
] | |
09b0bfd7ba89b753adde15365a1674b25fb38d71 | 1488955228c48cbaff586e2a8d86249a47645d0d | /app/main/views.py | ec63c98a0f7b5aab3d87e02ab569d663a4452b22 | [] | no_license | vincentouma/watchlist | a286c9d09bb06b18edfa4bc8883e9ec7f302bd01 | 329f90c23e373e14a29f1764cb8958adbbb02279 | refs/heads/master | 2020-06-28T11:00:17.353435 | 2019-08-02T10:19:16 | 2019-08-02T10:19:16 | 198,234,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,956 | py | from flask import render_template,request,redirect,url_for,abort
from . import main
from ..requests import get_movies,get_movie,search_movie
from ..models import Review, User
from .forms import ReviewForm,UpdateProfile
from flask_login import login_required,current_user
from .. import db,photos
import markdown2
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
#getting popular movies
popular_movies = get_movies('popular')
upcoming_movie = get_movies('upcoming')
now_showing_movie = get_movies('now_playing')
title = 'Home - Welcome to The Best best Movie Review Website Online'
# message = 'Hello World'
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('.search',movie_name = search_movie))
else:
return render_template('index.html',title = title, popular = popular_movies,upcoming = upcoming_movie, now_playing = now_showing_movie)
@main.route('/movie/<int:id>')
def movie(id):
'''
View root page function theat returns the index pages and its data
'''
movie = get_movie(id)
title = f'{movie.title}'
reviews = Review.get_reviews(movie.id)
return render_template('movie.html', title = title, movie = movie, reviews = reviews)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
view function to display search results
'''
movie_name_list = movie_name.split(" ")
movie_name_format = "+".join(movie_name_list)
searched_movies = search_movie(movie_name_format)
title = f'search resultd for {movie_name}'
return render_template('search.html', title = title, movies = searched_movies)
@main.route('/movie/review/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_review(id):
form = ReviewForm()
movie = get_movie(id)
if form.validate_on_submit():
title = form.title.data
review = form.review.data
# Updated review instance
new_review = Review(movie_id=movie.id,movie_title=title,image_path=movie.poster,movie_review=review,user=current_user)
# save review method
new_review.save_review()
return redirect(url_for('.movie',id = movie.id ))
title = f'{movie.title} review'
return render_template('new_review.html',title = title, review_form=form, movie=movie)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/review/<int:id>')
def single_review(id):
review=Review.query.get(id)
if review is None:
abort(404)
format_review = markdown2.markdown(review.movie_review,extras=["code-friendly", "fenced-code-blocks"])
return render_template('review.html',review = review,format_review=format_review)
| [
"[email protected]"
] | |
da6cc4d0465295d7dfc8e71959ada0bb8de28a93 | 87706e10023b027bf6b4ef9146242a99c0ebbea2 | /docs/conf.py | 1fe4bd3668497bf382483451a6823a64d9af1fb9 | [
"Unlicense"
] | permissive | Kadantte/anime-downloader | 206dc7b9850d6494135ee143c4069df024e500d0 | 24de83d4ef392e17f39710cc9054ff90e3602533 | refs/heads/master | 2022-09-24T02:16:30.770196 | 2022-09-12T11:12:28 | 2022-09-12T11:12:28 | 168,595,085 | 8 | 0 | Unlicense | 2022-09-12T15:01:57 | 2019-01-31T20:54:19 | Python | UTF-8 | Python | false | false | 5,643 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../anime_downloader'))
# -- Project information -----------------------------------------------------
project = 'anime-downloader'
copyright = '2018, Vishnunarayan K I'
author = 'Vishnunarayan K I'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '3.5.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'anime-downloaderdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'anime-downloader.tex', 'anime-downloader Documentation',
'Vishnunarayan K I', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'anime-downloader', 'anime-downloader Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'anime-downloader', 'anime-downloader Documentation',
author, 'anime-downloader', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"[email protected]"
] | |
a31238640d5eae14cbeb79d20eede97d6bcd4516 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/Toontown2016/toontown/parties/DistributedPartyCatchActivity.py | e776ef0b7ed00871035165e64ae831b02d97636a | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,123 | py | from pandac.PandaModules import Vec3, Point3, Point4, TextNode, NodePath
from pandac.PandaModules import CollisionHandlerEvent, CollisionNode, CollisionSphere
from direct.distributed.ClockDelta import globalClockDelta
from direct.interval.IntervalGlobal import Sequence, Parallel
from direct.interval.IntervalGlobal import LerpScaleInterval, LerpFunctionInterval, LerpColorScaleInterval, LerpPosInterval
from direct.interval.IntervalGlobal import SoundInterval, WaitInterval
from direct.showbase.PythonUtil import Functor, bound, lerp, SerialNumGen
from direct.showbase.RandomNumGen import RandomNumGen
from direct.task.Task import Task
from direct.distributed import DistributedSmoothNode
from direct.directnotify import DirectNotifyGlobal
from direct.interval.FunctionInterval import Wait, Func
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from toontown.toonbase import ToontownGlobals
from toontown.minigame.Trajectory import Trajectory
from toontown.minigame.OrthoDrive import OrthoDrive
from toontown.minigame.OrthoWalk import OrthoWalk
from toontown.minigame.DropPlacer import PartyRegionDropPlacer
from toontown.parties import PartyGlobals
from toontown.parties.PartyCatchActivityToonSD import PartyCatchActivityToonSD
from toontown.parties.DistributedPartyActivity import DistributedPartyActivity
from toontown.parties.DistributedPartyCatchActivityBase import DistributedPartyCatchActivityBase
from toontown.parties.DistributedPartyCannonActivity import DistributedPartyCannonActivity
from toontown.parties.activityFSMs import CatchActivityFSM
class DistributedPartyCatchActivity(DistributedPartyActivity, DistributedPartyCatchActivityBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyCatchActivity')
DropTaskName = 'dropSomething'
DropObjectPlurals = {'apple': TTLocalizer.PartyCatchActivityApples,
'orange': TTLocalizer.PartyCatchActivityOranges,
'pear': TTLocalizer.PartyCatchActivityPears,
'coconut': TTLocalizer.PartyCatchActivityCoconuts,
'watermelon': TTLocalizer.PartyCatchActivityWatermelons,
'pineapple': TTLocalizer.PartyCatchActivityPineapples,
'anvil': TTLocalizer.PartyCatchActivityAnvils}
class Generation:
def __init__(self, generation, startTime, startNetworkTime, numPlayers):
self.generation = generation
self.startTime = startTime
self.startNetworkTime = startNetworkTime
self.numPlayers = numPlayers
self.hasBeenScheduled = False
self.droppedObjNames = []
self.dropSchedule = []
self.numItemsDropped = 0
self.droppedObjCaught = {}
def __init__(self, cr):
DistributedPartyActivity.__init__(self, cr, PartyGlobals.ActivityIds.PartyCatch, PartyGlobals.ActivityTypes.HostInitiated, wantRewardGui=True)
self.setUsesSmoothing()
self.setUsesLookAround()
self._sNumGen = SerialNumGen()
def getTitle(self):
return TTLocalizer.PartyCatchActivityTitle
def getInstructions(self):
return TTLocalizer.PartyCatchActivityInstructions % {'badThing': self.DropObjectPlurals['anvil']}
def generate(self):
DistributedPartyActivity.generate(self)
self.notify.info('localAvatar doId: %s' % base.localAvatar.doId)
self.notify.info('generate()')
self._generateFrame = globalClock.getFrameCount()
self._id2gen = {}
self._orderedGenerations = []
self._orderedGenerationIndex = None
rng = RandomNumGen(self.doId)
self._generationSeedBase = rng.randrange(1000)
self._lastDropTime = 0.0
return
def getCurGeneration(self):
if self._orderedGenerationIndex is None:
return
return self._orderedGenerations[self._orderedGenerationIndex]
def _addGeneration(self, generation, startTime, startNetworkTime, numPlayers):
self._id2gen[generation] = self.Generation(generation, startTime, startNetworkTime, numPlayers)
i = 0
while 1:
if i >= len(self._orderedGenerations):
break
gen = self._orderedGenerations[i]
startNetT = self._id2gen[gen].startTime
genId = self._id2gen[gen].generation
if startNetT > startNetworkTime:
break
if startNetT == startNetworkTime and genId > generation:
break
i += 1
self._orderedGenerations = self._orderedGenerations[:i] + [generation] + self._orderedGenerations[i:]
if self._orderedGenerationIndex is not None:
if self._orderedGenerationIndex >= i:
self._orderedGenerationIndex += 1
def _removeGeneration(self, generation):
del self._id2gen[generation]
i = self._orderedGenerations.index(generation)
self._orderedGenerations = self._orderedGenerations[:i] + self._orderedGenerations[i + 1:]
if self._orderedGenerationIndex is not None:
if len(self._orderedGenerations):
if self._orderedGenerationIndex >= i:
self._orderedGenerationIndex -= 1
else:
self._orderedGenerationIndex = None
return
def announceGenerate(self):
self.notify.info('announceGenerate()')
self.catchTreeZoneEvent = 'fence_floor'
DistributedPartyActivity.announceGenerate(self)
def load(self, loadModels = 1, arenaModel = 'partyCatchTree'):
self.notify.info('load()')
DistributedPartyCatchActivity.notify.debug('PartyCatch: load')
self.activityFSM = CatchActivityFSM(self)
if __dev__:
for o in xrange(3):
print {0: 'SPOTS PER PLAYER',
1: 'DROPS PER MINUTE PER SPOT DURING NORMAL DROP PERIOD',
2: 'DROPS PER MINUTE PER PLAYER DURING NORMAL DROP PERIOD'}[o]
for i in xrange(1, self.FallRateCap_Players + 10):
self.defineConstants(forceNumPlayers=i)
numDropLocations = self.DropRows * self.DropColumns
numDropsPerMin = 60.0 / self.DropPeriod
if o == 0:
spotsPerPlayer = numDropLocations / float(i)
print '%2d PLAYERS: %s' % (i, spotsPerPlayer)
elif o == 1:
numDropsPerMinPerSpot = numDropsPerMin / numDropLocations
print '%2d PLAYERS: %s' % (i, numDropsPerMinPerSpot)
elif i > 0:
numDropsPerMinPerPlayer = numDropsPerMin / i
print '%2d PLAYERS: %s' % (i, numDropsPerMinPerPlayer)
self.defineConstants()
self.treesAndFence = loader.loadModel('phase_13/models/parties/%s' % arenaModel)
self.treesAndFence.setScale(0.9)
self.treesAndFence.find('**/fence_floor').setPos(0.0, 0.0, 0.1)
self.treesAndFence.reparentTo(self.root)
ground = self.treesAndFence.find('**/groundPlane')
ground.setBin('ground', 1)
DistributedPartyActivity.load(self)
exitText = TextNode('PartyCatchExitText')
exitText.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
exitText.setCardDecal(True)
exitText.setCardColor(1.0, 1.0, 1.0, 0.0)
exitText.setText(TTLocalizer.PartyCatchActivityExit)
exitText.setTextColor(0.0, 8.0, 0.0, 0.9)
exitText.setAlign(exitText.ACenter)
exitText.setFont(ToontownGlobals.getBuildingNametagFont())
exitText.setShadowColor(0, 0, 0, 1)
exitText.setBin('fixed')
if TTLocalizer.BuildingNametagShadow:
exitText.setShadow(*TTLocalizer.BuildingNametagShadow)
exitTextLoc = self.treesAndFence.find('**/loc_exitSignText')
exitTextNp = exitTextLoc.attachNewNode(exitText)
exitTextNp.setDepthWrite(0)
exitTextNp.setScale(4)
exitTextNp.setZ(-.5)
self.sign.reparentTo(self.treesAndFence.find('**/loc_eventSign'))
self.sign.wrtReparentTo(self.root)
self.avatarNodePath = NodePath('PartyCatchAvatarNodePath')
self.avatarNodePath.reparentTo(self.root)
self._avatarNodePathParentToken = 3
base.cr.parentMgr.registerParent(self._avatarNodePathParentToken, self.avatarNodePath)
self.toonSDs = {}
self.dropShadow = loader.loadModelOnce('phase_3/models/props/drop_shadow')
self.dropObjModels = {}
if loadModels:
self.__loadDropModels()
self.sndGoodCatch = base.loader.loadSfx('phase_4/audio/sfx/SZ_DD_treasure.ogg')
self.sndOof = base.loader.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
self.sndAnvilLand = base.loader.loadSfx('phase_4/audio/sfx/AA_drop_anvil_miss.ogg')
self.sndPerfect = base.loader.loadSfx('phase_4/audio/sfx/ring_perfect.ogg')
self.__textGen = TextNode('partyCatchActivity')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
self.activityFSM.request('Idle')
def __loadDropModels(self):
for objType in PartyGlobals.DropObjectTypes:
model = loader.loadModel(objType.modelPath)
self.dropObjModels[objType.name] = model
modelScales = {'apple': 0.7,
'orange': 0.7,
'pear': 0.5,
'coconut': 0.7,
'watermelon': 0.6,
'pineapple': 0.45}
if modelScales.has_key(objType.name):
model.setScale(modelScales[objType.name])
if objType == PartyGlobals.Name2DropObjectType['pear']:
model.setZ(-.6)
if objType == PartyGlobals.Name2DropObjectType['coconut']:
model.setP(180)
if objType == PartyGlobals.Name2DropObjectType['watermelon']:
model.setH(135)
model.setZ(-.5)
if objType == PartyGlobals.Name2DropObjectType['pineapple']:
model.setZ(-1.7)
if objType == PartyGlobals.Name2DropObjectType['anvil']:
model.setZ(-self.ObjRadius)
model.flattenStrong()
def unload(self):
DistributedPartyCatchActivity.notify.debug('unload')
self.finishAllDropIntervals()
self.destroyOrthoWalk()
DistributedPartyActivity.unload(self)
self.stopDropTask()
del self.activityFSM
del self.__textGen
for avId in self.toonSDs.keys():
if self.toonSDs.has_key(avId):
toonSD = self.toonSDs[avId]
toonSD.unload()
del self.toonSDs
self.treesAndFence.removeNode()
del self.treesAndFence
self.dropShadow.removeNode()
del self.dropShadow
base.cr.parentMgr.unregisterParent(self._avatarNodePathParentToken)
for model in self.dropObjModels.values():
model.removeNode()
del self.dropObjModels
del self.sndGoodCatch
del self.sndOof
del self.sndAnvilLand
del self.sndPerfect
def setStartTimestamp(self, timestamp32):
self.notify.info('setStartTimestamp(%s)' % (timestamp32,))
self._startTimestamp = globalClockDelta.networkToLocalTime(timestamp32, bits=32)
def getCurrentCatchActivityTime(self):
return globalClock.getFrameTime() - self._startTimestamp
def getObjModel(self, objName):
return self.dropObjModels[objName].copyTo(hidden)
def joinRequestDenied(self, reason):
DistributedPartyActivity.joinRequestDenied(self, reason)
base.cr.playGame.getPlace().fsm.request('walk')
def handleToonJoined(self, toonId):
if not self.toonSDs.has_key(toonId):
toonSD = PartyCatchActivityToonSD(toonId, self)
self.toonSDs[toonId] = toonSD
toonSD.load()
self.notify.debug('handleToonJoined : currentState = %s' % self.activityFSM.state)
self.cr.doId2do[toonId].useLOD(500)
if self.activityFSM.state == 'Active':
if self.toonSDs.has_key(toonId):
self.toonSDs[toonId].enter()
if base.localAvatar.doId == toonId:
base.localAvatar.b_setParent(self._avatarNodePathParentToken)
self.putLocalAvatarInActivity()
if self.toonSDs.has_key(toonId):
self.toonSDs[toonId].fsm.request('rules')
def handleToonExited(self, toonId):
self.notify.debug('handleToonExited( toonId=%s )' % toonId)
if self.cr.doId2do.has_key(toonId):
self.cr.doId2do[toonId].resetLOD()
if self.toonSDs.has_key(toonId):
self.toonSDs[toonId].fsm.request('notPlaying')
self.toonSDs[toonId].exit()
self.toonSDs[toonId].unload()
del self.toonSDs[toonId]
if base.localAvatar.doId == toonId:
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def takeLocalAvatarOutOfActivity(self):
self.notify.debug('localToon has left the circle')
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
base.localAvatar.enableSmartCameraViews()
base.localAvatar.setCameraPositionByIndex(base.localAvatar.cameraIndex)
DistributedSmoothNode.activateSmoothing(1, 0)
def _enableCollisions(self):
DistributedPartyActivity._enableCollisions(self)
self._enteredTree = False
self.accept('enter' + self.catchTreeZoneEvent, self._toonMayHaveEnteredTree)
self.accept('again' + self.catchTreeZoneEvent, self._toonMayHaveEnteredTree)
self.accept('exit' + self.catchTreeZoneEvent, self._toonExitedTree)
self.accept(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT, self._handleCannonLanded)
def _disableCollisions(self):
self.ignore(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT)
self.ignore('enter' + self.catchTreeZoneEvent)
self.ignore('again' + self.catchTreeZoneEvent)
self.ignore('exit' + self.catchTreeZoneEvent)
DistributedPartyActivity._disableCollisions(self)
def _handleCannonLanded(self):
x = base.localAvatar.getX()
y = base.localAvatar.getY()
if x > self.x - self.StageHalfWidth and x < self.x + self.StageHalfWidth and y > self.y - self.StageHalfHeight and y < self.y + self.StageHalfHeight:
self._toonEnteredTree(None)
return
def _toonMayHaveEnteredTree(self, collEntry):
if self._enteredTree:
return
if base.localAvatar.controlManager.currentControls.getIsAirborne():
return
self._toonEnteredTree(collEntry)
def _toonEnteredTree(self, collEntry):
self.notify.debug('_toonEnteredTree : avid = %s' % base.localAvatar.doId)
self.notify.debug('_toonEnteredTree : currentState = %s' % self.activityFSM.state)
if self.isLocalToonInActivity():
return
if self.activityFSM.state == 'Active':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
elif self.activityFSM.state == 'Idle':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
self._enteredTree = True
def _toonExitedTree(self, collEntry):
self.notify.debug('_toonExitedTree : avid = %s' % base.localAvatar.doId)
self._enteredTree = False
if hasattr(base.cr.playGame.getPlace(), 'fsm') and self.activityFSM.state == 'Active' and self.isLocalToonInActivity():
if self.toonSDs.has_key(base.localAvatar.doId):
self.takeLocalAvatarOutOfActivity()
self.toonSDs[base.localAvatar.doId].fsm.request('notPlaying')
self.d_toonExitDemand()
def setToonsPlaying(self, toonIds):
self.notify.info('setToonsPlaying(%s)' % (toonIds,))
DistributedPartyActivity.setToonsPlaying(self, toonIds)
if self.isLocalToonInActivity() and base.localAvatar.doId not in toonIds:
if self.toonSDs.has_key(base.localAvatar.doId):
self.takeLocalAvatarOutOfActivity()
self.toonSDs[base.localAvatar.doId].fsm.request('notPlaying')
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def getNumPlayers(self):
return len(self.toonIds)
def defineConstants(self, forceNumPlayers = None):
DistributedPartyCatchActivity.notify.debug('defineConstants')
self.ShowObjSpheres = 0
self.ShowToonSpheres = 0
self.useGravity = True
self.trickShadows = True
if forceNumPlayers is None:
numPlayers = self.getNumPlayers()
else:
numPlayers = forceNumPlayers
self.calcDifficultyConstants(numPlayers)
DistributedPartyCatchActivity.notify.debug('ToonSpeed: %s' % self.ToonSpeed)
DistributedPartyCatchActivity.notify.debug('total drops: %s' % self.totalDrops)
DistributedPartyCatchActivity.notify.debug('numFruits: %s' % self.numFruits)
DistributedPartyCatchActivity.notify.debug('numAnvils: %s' % self.numAnvils)
self.ObjRadius = 1.0
dropRegionTable = PartyRegionDropPlacer.getDropRegionTable(numPlayers)
self.DropRows, self.DropColumns = len(dropRegionTable), len(dropRegionTable[0])
for objType in PartyGlobals.DropObjectTypes:
DistributedPartyCatchActivity.notify.debug('*** Object Type: %s' % objType.name)
objType.onscreenDuration = objType.onscreenDurMult * self.BaselineOnscreenDropDuration
DistributedPartyCatchActivity.notify.debug('onscreenDuration=%s' % objType.onscreenDuration)
v_0 = 0.0
t = objType.onscreenDuration
x_0 = self.MinOffscreenHeight
x = 0.0
g = 2.0 * (x - x_0 - v_0 * t) / (t * t)
DistributedPartyCatchActivity.notify.debug('gravity=%s' % g)
objType.trajectory = Trajectory(0, Vec3(0, 0, x_0), Vec3(0, 0, v_0), gravMult=abs(g / Trajectory.gravity))
objType.fallDuration = objType.onscreenDuration + self.OffscreenTime
return
def grid2world(self, column, row):
x = column / float(self.DropColumns - 1)
y = row / float(self.DropRows - 1)
x = x * 2.0 - 1.0
y = y * 2.0 - 1.0
x *= self.StageHalfWidth
y *= self.StageHalfHeight
return (x, y)
def showPosts(self):
self.hidePosts()
self.posts = [Toon.Toon(),
Toon.Toon(),
Toon.Toon(),
Toon.Toon()]
for i in xrange(len(self.posts)):
tree = self.posts[i]
tree.reparentTo(render)
x = self.StageHalfWidth
y = self.StageHalfHeight
if i > 1:
x = -x
if i % 2:
y = -y
tree.setPos(x + self.x, y + self.y, 0)
def hidePosts(self):
if hasattr(self, 'posts'):
for tree in self.posts:
tree.removeNode()
del self.posts
def showDropGrid(self):
self.hideDropGrid()
self.dropMarkers = []
for row in xrange(self.DropRows):
self.dropMarkers.append([])
rowList = self.dropMarkers[row]
for column in xrange(self.DropColumns):
toon = Toon.Toon()
toon.setDNA(base.localAvatar.getStyle())
toon.reparentTo(self.root)
toon.setScale(1.0 / 3)
x, y = self.grid2world(column, row)
toon.setPos(x, y, 0)
rowList.append(toon)
def hideDropGrid(self):
if hasattr(self, 'dropMarkers'):
for row in self.dropMarkers:
for marker in row:
marker.removeNode()
del self.dropMarkers
def handleToonDisabled(self, avId):
DistributedPartyCatchActivity.notify.debug('handleToonDisabled')
DistributedPartyCatchActivity.notify.debug('avatar ' + str(avId) + ' disabled')
if self.toonSDs.has_key(avId):
self.toonSDs[avId].exit(unexpectedExit=True)
del self.toonSDs[avId]
def turnOffSmoothingOnGuests(self):
pass
def setState(self, newState, timestamp):
self.notify.info('setState(%s, %s)' % (newState, timestamp))
DistributedPartyCatchActivity.notify.debug('setState( newState=%s, ... )' % newState)
DistributedPartyActivity.setState(self, newState, timestamp)
self.activityFSM.request(newState)
if newState == 'Active':
if base.localAvatar.doId != self.party.partyInfo.hostId:
if globalClock.getFrameCount() > self._generateFrame:
if base.localAvatar.getX() > self.x - self.StageHalfWidth and base.localAvatar.getX() < self.x + self.StageHalfWidth and base.localAvatar.getY() > self.y - self.StageHalfHeight and base.localAvatar.getY() < self.y + self.StageHalfHeight:
self._toonEnteredTree(None)
return
def putLocalAvatarInActivity(self):
if base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'fsm'):
base.cr.playGame.getPlace().fsm.request('activity', [False])
else:
self.notify.info("Avoided crash: toontown.parties.DistributedPartyCatchActivity:632, toontown.parties.DistributedPartyCatchActivity:1198, toontown.parties.activityFSMMixins:49, direct.fsm.FSM:423, AttributeError: 'NoneType' object has no attribute 'fsm'")
base.localAvatar.stopUpdateSmartCamera()
camera.reparentTo(self.treesAndFence)
camera.setPosHpr(0.0, -63.0, 30.0, 0.0, -20.0, 0.0)
if not hasattr(self, 'ltLegsCollNode'):
self.createCatchCollisions()
def createCatchCollisions(self):
radius = 0.7
handler = CollisionHandlerEvent()
handler.setInPattern('ltCatch%in')
self.ltLegsCollNode = CollisionNode('catchLegsCollNode')
self.ltLegsCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltHeadCollNode = CollisionNode('catchHeadCollNode')
self.ltHeadCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltLHandCollNode = CollisionNode('catchLHandCollNode')
self.ltLHandCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltRHandCollNode = CollisionNode('catchRHandCollNode')
self.ltRHandCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
legsCollNodepath = base.localAvatar.attachNewNode(self.ltLegsCollNode)
legsCollNodepath.hide()
head = base.localAvatar.getHeadParts().getPath(2)
headCollNodepath = head.attachNewNode(self.ltHeadCollNode)
headCollNodepath.hide()
lHand = base.localAvatar.getLeftHands()[0]
lHandCollNodepath = lHand.attachNewNode(self.ltLHandCollNode)
lHandCollNodepath.hide()
rHand = base.localAvatar.getRightHands()[0]
rHandCollNodepath = rHand.attachNewNode(self.ltRHandCollNode)
rHandCollNodepath.hide()
base.localAvatar.cTrav.addCollider(legsCollNodepath, handler)
base.localAvatar.cTrav.addCollider(headCollNodepath, handler)
base.localAvatar.cTrav.addCollider(lHandCollNodepath, handler)
base.localAvatar.cTrav.addCollider(lHandCollNodepath, handler)
if self.ShowToonSpheres:
legsCollNodepath.show()
headCollNodepath.show()
lHandCollNodepath.show()
rHandCollNodepath.show()
self.ltLegsCollNode.addSolid(CollisionSphere(0, 0, radius, radius))
self.ltHeadCollNode.addSolid(CollisionSphere(0, 0, 0, radius))
self.ltLHandCollNode.addSolid(CollisionSphere(0, 0, 0, 2 * radius / 3.0))
self.ltRHandCollNode.addSolid(CollisionSphere(0, 0, 0, 2 * radius / 3.0))
self.toonCollNodes = [legsCollNodepath,
headCollNodepath,
lHandCollNodepath,
rHandCollNodepath]
def destroyCatchCollisions(self):
if not hasattr(self, 'ltLegsCollNode'):
return
for collNode in self.toonCollNodes:
while collNode.node().getNumSolids():
collNode.node().removeSolid(0)
base.localAvatar.cTrav.removeCollider(collNode)
del self.toonCollNodes
del self.ltLegsCollNode
del self.ltHeadCollNode
del self.ltLHandCollNode
del self.ltRHandCollNode
def timerExpired(self):
pass
def __handleCatch(self, generation, objNum):
DistributedPartyCatchActivity.notify.debug('catch: %s' % [generation, objNum])
if base.localAvatar.doId not in self.toonIds:
return
self.showCatch(base.localAvatar.doId, generation, objNum)
objName = self._id2gen[generation].droppedObjNames[objNum]
objTypeId = PartyGlobals.Name2DOTypeId[objName]
self.sendUpdate('claimCatch', [generation, objNum, objTypeId])
self.finishDropInterval(generation, objNum)
def showCatch(self, avId, generation, objNum):
if not self.toonSDs.has_key(avId):
return
isLocal = avId == base.localAvatar.doId
if generation not in self._id2gen:
return
if not self._id2gen[generation].hasBeenScheduled:
return
objName = self._id2gen[generation].droppedObjNames[objNum]
objType = PartyGlobals.Name2DropObjectType[objName]
if objType.good:
if not self._id2gen[generation].droppedObjCaught.has_key(objNum):
if isLocal:
base.playSfx(self.sndGoodCatch)
fruit = self.getObjModel(objName)
toon = self.getAvatar(avId)
rHand = toon.getRightHands()[1]
self.toonSDs[avId].eatFruit(fruit, rHand)
else:
self.toonSDs[avId].fsm.request('fallForward')
self._id2gen[generation].droppedObjCaught[objNum] = 1
def setObjectCaught(self, avId, generation, objNum):
self.notify.info('setObjectCaught(%s, %s, %s)' % (avId, generation, objNum))
if self.activityFSM.state != 'Active':
DistributedPartyCatchActivity.notify.warning('ignoring msg: object %s caught by %s' % (objNum, avId))
return
isLocal = avId == base.localAvatar.doId
if not isLocal:
DistributedPartyCatchActivity.notify.debug('AI: avatar %s caught %s' % (avId, objNum))
self.finishDropInterval(generation, objNum)
self.showCatch(avId, generation, objNum)
self._scheduleGenerations()
gen = self._id2gen[generation]
if gen.hasBeenScheduled:
objName = gen.droppedObjNames[objNum]
if PartyGlobals.Name2DropObjectType[objName].good:
if hasattr(self, 'fruitsCaught'):
self.fruitsCaught += 1
def finishDropInterval(self, generation, objNum):
if hasattr(self, 'dropIntervals'):
if self.dropIntervals.has_key((generation, objNum)):
self.dropIntervals[generation, objNum].finish()
def finishAllDropIntervals(self):
if hasattr(self, 'dropIntervals'):
for dropInterval in self.dropIntervals.values():
dropInterval.finish()
def setGenerations(self, generations):
self.notify.info('setGenerations(%s)' % (generations,))
gen2t = {}
gen2nt = {}
gen2np = {}
for id, timestamp32, numPlayers in generations:
gen2t[id] = globalClockDelta.networkToLocalTime(timestamp32, bits=32) - self._startTimestamp
gen2nt[id] = timestamp32
gen2np[id] = numPlayers
ids = self._id2gen.keys()
for id in ids:
if id not in gen2t:
self._removeGeneration(id)
for id in gen2t:
if id not in self._id2gen:
self._addGeneration(id, gen2t[id], gen2nt[id], gen2np[id])
def scheduleDrops(self, genId = None):
if genId is None:
genId = self.getCurGeneration()
gen = self._id2gen[genId]
if gen.hasBeenScheduled:
return
fruitIndex = int((gen.startTime + 0.5 * self.DropPeriod) / PartyGlobals.CatchActivityDuration)
fruitNames = ['apple',
'orange',
'pear',
'coconut',
'watermelon',
'pineapple']
fruitName = fruitNames[fruitIndex % len(fruitNames)]
rng = RandomNumGen(genId + self._generationSeedBase)
gen.droppedObjNames = [fruitName] * self.numFruits + ['anvil'] * self.numAnvils
rng.shuffle(gen.droppedObjNames)
dropPlacer = PartyRegionDropPlacer(self, gen.numPlayers, genId, gen.droppedObjNames, startTime=gen.startTime)
gen.numItemsDropped = 0
tIndex = gen.startTime % PartyGlobals.CatchActivityDuration
tPercent = float(tIndex) / PartyGlobals.CatchActivityDuration
gen.numItemsDropped += dropPlacer.skipPercent(tPercent)
while not dropPlacer.doneDropping(continuous=True):
nextDrop = dropPlacer.getNextDrop()
gen.dropSchedule.append(nextDrop)
gen.hasBeenScheduled = True
return
def startDropTask(self):
taskMgr.add(self.dropTask, self.DropTaskName)
def stopDropTask(self):
taskMgr.remove(self.DropTaskName)
def _scheduleGenerations(self):
curT = self.getCurrentCatchActivityTime()
genIndex = self._orderedGenerationIndex
newGenIndex = genIndex
while genIndex is None or genIndex < len(self._orderedGenerations) - 1:
if genIndex is None:
nextGenIndex = 0
else:
nextGenIndex = genIndex + 1
nextGenId = self._orderedGenerations[nextGenIndex]
nextGen = self._id2gen[nextGenId]
startT = nextGen.startTime
if curT >= startT:
newGenIndex = nextGenIndex
if not nextGen.hasBeenScheduled:
self.defineConstants(forceNumPlayers=nextGen.numPlayers)
self.scheduleDrops(genId=self._orderedGenerations[nextGenIndex])
genIndex = nextGenIndex
self._orderedGenerationIndex = newGenIndex
return
def dropTask(self, task):
self._scheduleGenerations()
curT = self.getCurrentCatchActivityTime()
if self._orderedGenerationIndex is not None:
i = self._orderedGenerationIndex
genIndex = self._orderedGenerations[i]
gen = self._id2gen[genIndex]
while len(gen.dropSchedule) > 0 and gen.dropSchedule[0][0] < curT:
drop = gen.dropSchedule[0]
gen.dropSchedule = gen.dropSchedule[1:]
dropTime, objName, dropCoords = drop
objNum = gen.numItemsDropped
x, y = self.grid2world(*dropCoords)
dropIval = self.getDropIval(x, y, objName, genIndex, objNum)
def cleanup(generation, objNum, self = self):
del self.dropIntervals[generation, objNum]
dropIval.append(Func(Functor(cleanup, genIndex, objNum)))
self.dropIntervals[genIndex, objNum] = dropIval
gen.numItemsDropped += 1
dropIval.start(curT - dropTime)
self._lastDropTime = dropTime
return Task.cont
def getDropIval(self, x, y, dropObjName, generation, num):
objType = PartyGlobals.Name2DropObjectType[dropObjName]
id = (generation, num)
dropNode = hidden.attachNewNode('catchDropNode%s' % (id,))
dropNode.setPos(x, y, 0)
shadow = self.dropShadow.copyTo(dropNode)
shadow.setZ(PartyGlobals.CatchDropShadowHeight)
shadow.setColor(1, 1, 1, 1)
object = self.getObjModel(dropObjName)
object.reparentTo(hidden)
if dropObjName in ['watermelon', 'anvil']:
objH = object.getH()
absDelta = {'watermelon': 12,
'anvil': 15}[dropObjName]
delta = (self.randomNumGen.random() * 2.0 - 1.0) * absDelta
newH = objH + delta
else:
newH = self.randomNumGen.random() * 360.0
object.setH(newH)
sphereName = 'FallObj%s' % (id,)
radius = self.ObjRadius
if objType.good:
radius *= lerp(1.0, 1.3, 0.5)
collSphere = CollisionSphere(0, 0, 0, radius)
collSphere.setTangible(0)
collNode = CollisionNode(sphereName)
collNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
collNode.addSolid(collSphere)
collNodePath = object.attachNewNode(collNode)
collNodePath.hide()
if self.ShowObjSpheres:
collNodePath.show()
catchEventName = 'ltCatch' + sphereName
def eatCollEntry(forward, collEntry):
forward()
self.accept(catchEventName, Functor(eatCollEntry, Functor(self.__handleCatch, id[0], id[1])))
def cleanup(self = self, dropNode = dropNode, id = id, event = catchEventName):
self.ignore(event)
dropNode.removeNode()
duration = objType.fallDuration
onscreenDuration = objType.onscreenDuration
targetShadowScale = 0.3
if self.trickShadows:
intermedScale = targetShadowScale * (self.OffscreenTime / self.BaselineDropDuration)
shadowScaleIval = Sequence(LerpScaleInterval(shadow, self.OffscreenTime, intermedScale, startScale=0))
shadowScaleIval.append(LerpScaleInterval(shadow, duration - self.OffscreenTime, targetShadowScale, startScale=intermedScale))
else:
shadowScaleIval = LerpScaleInterval(shadow, duration, targetShadowScale, startScale=0)
targetShadowAlpha = 0.4
shadowAlphaIval = LerpColorScaleInterval(shadow, self.OffscreenTime, Point4(1, 1, 1, targetShadowAlpha), startColorScale=Point4(1, 1, 1, 0))
shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
if self.useGravity:
def setObjPos(t, objType = objType, object = object):
z = objType.trajectory.calcZ(t)
object.setZ(z)
setObjPos(0)
dropIval = LerpFunctionInterval(setObjPos, fromData=0, toData=onscreenDuration, duration=onscreenDuration)
else:
startPos = Point3(0, 0, self.MinOffscreenHeight)
object.setPos(startPos)
dropIval = LerpPosInterval(object, onscreenDuration, Point3(0, 0, 0), startPos=startPos, blendType='easeIn')
ival = Sequence(Func(Functor(dropNode.reparentTo, self.root)), Parallel(Sequence(WaitInterval(self.OffscreenTime), Func(Functor(object.reparentTo, dropNode)), dropIval), shadowIval), Func(cleanup), name='drop%s' % (id,))
if objType == PartyGlobals.Name2DropObjectType['anvil']:
ival.append(Func(self.playAnvil))
return ival
def playAnvil(self):
if base.localAvatar.doId in self.toonIds:
base.playSfx(self.sndAnvilLand)
def initOrthoWalk(self):
DistributedPartyCatchActivity.notify.debug('startOrthoWalk')
def doCollisions(oldPos, newPos, self = self):
x = bound(newPos[0], self.StageHalfWidth, -self.StageHalfWidth)
y = bound(newPos[1], self.StageHalfHeight, -self.StageHalfHeight)
newPos.setX(x)
newPos.setY(y)
return newPos
orthoDrive = OrthoDrive(self.ToonSpeed, instantTurn=True)
self.orthoWalk = OrthoWalk(orthoDrive, broadcast=True)
def destroyOrthoWalk(self):
DistributedPartyCatchActivity.notify.debug('destroyOrthoWalk')
if hasattr(self, 'orthoWalk'):
self.orthoWalk.stop()
self.orthoWalk.destroy()
del self.orthoWalk
def startIdle(self):
DistributedPartyCatchActivity.notify.debug('startIdle')
def finishIdle(self):
DistributedPartyCatchActivity.notify.debug('finishIdle')
def startActive(self):
DistributedPartyCatchActivity.notify.debug('startActive')
for avId in self.toonIds:
if self.toonSDs.has_key(avId):
toonSD = self.toonSDs[avId]
toonSD.enter()
toonSD.fsm.request('normal')
self.fruitsCaught = 0
self.dropIntervals = {}
self.startDropTask()
if base.localAvatar.doId in self.toonIds:
self.putLocalAvatarInActivity()
def finishActive(self):
DistributedPartyCatchActivity.notify.debug('finishActive')
self.stopDropTask()
if hasattr(self, 'finishIval'):
self.finishIval.pause()
del self.finishIval
if base.localAvatar.doId in self.toonIds:
self.takeLocalAvatarOutOfActivity()
for ival in self.dropIntervals.values():
ival.finish()
del self.dropIntervals
def startConclusion(self):
DistributedPartyCatchActivity.notify.debug('startConclusion')
for avId in self.toonIds:
if self.toonSDs.has_key(avId):
toonSD = self.toonSDs[avId]
toonSD.fsm.request('notPlaying')
self.destroyCatchCollisions()
if base.localAvatar.doId not in self.toonIds:
return
else:
self.localToonExiting()
if self.fruitsCaught >= self.numFruits:
finishText = TTLocalizer.PartyCatchActivityFinishPerfect
else:
finishText = TTLocalizer.PartyCatchActivityFinish
perfectTextSubnode = hidden.attachNewNode(self.__genText(finishText))
perfectText = hidden.attachNewNode('perfectText')
perfectTextSubnode.reparentTo(perfectText)
frame = self.__textGen.getCardActual()
offsetY = -abs(frame[2] + frame[3]) / 2.0
perfectTextSubnode.setPos(0, 0, offsetY)
perfectText.setColor(1, 0.1, 0.1, 1)
def fadeFunc(t, text = perfectText):
text.setColorScale(1, 1, 1, t)
def destroyText(text = perfectText):
text.removeNode()
textTrack = Sequence(Func(perfectText.reparentTo, aspect2d), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=0.3, startScale=0.0), LerpFunctionInterval(fadeFunc, fromData=0.0, toData=1.0, duration=0.5)), Wait(2.0), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=1.0), LerpFunctionInterval(fadeFunc, fromData=1.0, toData=0.0, duration=0.5, blendType='easeIn')), Func(destroyText), WaitInterval(0.5))
soundTrack = SoundInterval(self.sndPerfect)
self.finishIval = Parallel(textTrack, soundTrack)
self.finishIval.start()
def finishConclusion(self):
DistributedPartyCatchActivity.notify.debug('finishConclusion')
if base.localAvatar.doId in self.toonIds:
self.takeLocalAvatarOutOfActivity()
base.cr.playGame.getPlace().fsm.request('walk')
def showJellybeanReward(self, earnedAmount, jarAmount, message):
if earnedAmount > 0:
DistributedPartyActivity.showJellybeanReward(self, earnedAmount, jarAmount, message)
else:
base.cr.playGame.getPlace().fsm.request('walk')
| [
"[email protected]"
] | |
afd81f81f1f1b883587446ae90c0eef7fe9119b6 | 7d02813987b49c2a69d92b9b2fdf5148af37274f | /case/Recommend/testAccountBind.py | bfa36a597587e27610642247da283901f0f4eb06 | [] | no_license | xgh321324/api_test | 29e01cbe5f0b7c2df25fb7e781cedf8031140c72 | 2575495baac3ab90adab7a7a85904c38a78dd4b7 | refs/heads/master | 2022-07-23T19:54:39.320828 | 2022-07-02T09:13:35 | 2022-07-02T09:13:35 | 129,185,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | #coding:utf-8
from common.login_lanting import auto_login_by_UID
import requests,unittest,time,json
from common.logger import Log
from common.Hash import get_digit,get_sign
from common.Excel import Excel_util
class Account(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.s = requests.session()
cls.to = auto_login_by_UID()
cls.header = {'User-Agent': 'PelvicFloorPersonal/4.1.1 (iPad; iOS 10.1.1; Scale/2.00)',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-Hans-CN;q=1',
'Content-Type': 'application/json',
'requestApp': '2',
'requestclient': '2',
'versionForApp': '4.4.0',
'Authorization': 'Basic YXBpTGFudGluZ0BtZWRsYW5kZXIuY29tOkFwaVRobWxkTWxkQDIwMTM=',
'Connection': 'keep-alive'
}
cls.log = Log()
cls.excel = Excel_util(r'C:\Users\Administrator\Desktop\Interface_testcase.xls')
def test_bind_account01(self):
u'绑定提现账号接口'
self.log.info('开始测试绑定账号接口..')
url = 'http://api-rec.sunnycare.cc/v1/account/bind'
json_data = {
'token': self.to,
'timestamp': str(int(time.time())),
'alipay_account': '2088012687108144',
'real_name': '许广会',
'nick_name': '许广会',
'nonce': get_digit()
}
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('绑定支付宝返回:%s' % r.json())
#断言
self.assertEqual(200,r.json()['code'],msg='返回状态码不是200')
self.assertEqual('请求成功',r.json()['note'])
self.log.info('绑定账号接口测试结束!\n')
def test_bind_account02(self):
u'解除绑定账号接口'
self.log.info('开始测试解除绑定账号接口..')
url = 'http://api-rec.sunnycare.cc/v1/account/unbind'
json_data = {
'token': self.to,
'timestamp': str(int(time.time())),
'type': '0',#0,支付宝;1,微信
'nonce': get_digit()
}
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('解除绑定支付宝返回:%s' % r.json())
#断言
self.assertEqual(200,r.json()['code'],msg='返回状态码不是200')
self.assertEqual('请求成功',r.json()['note'])
self.log.info('解除绑定账号接口测试结束!\n')
@classmethod
def tearDownClass(cls):
cls.s.close()
if __name__=='__main__':
unittest.main()
| [
"[email protected]"
] | |
56cfe94c34974098be5441d30e82c556d53a814e | 86a017dd4c8d4d77c511cc598190aaa9dc0ae3e8 | /data structure/mine_linked_list.py | 92ff136a59524e8fa5ebb2031ddd83e8e998da40 | [] | no_license | sungguenja/studying | fd7459eb9faa6488d7b63bf3884a92513daf3c54 | 719f4dfbda211c34de2a0c8cf3b9d3001f29fcec | refs/heads/master | 2023-08-17T13:46:44.343780 | 2023-08-10T11:55:15 | 2023-08-10T11:55:15 | 232,306,053 | 0 | 0 | null | 2022-12-16T10:53:26 | 2020-01-07T11:00:28 | Python | UTF-8 | Python | false | false | 1,711 | py | import mine_node
class LinkedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.top == None
def clear(self):
self.top = None
def push(self,item):
now_node = mine_node.Node(item,self.top)
self.top = now_node
def size(self):
node = self.top
count = 0
while node != None:
count += 1
node = node.link
return count
def getNode(self,position):
if position<0:
return None
node = self.head
while position>0 and node != None:
node = node.link
position -= 1
return node
def getValue(self,position):
node = self.getNode(position)
if node == None:
return None
else:
return node.data
def replace(self,item,position):
node = self.getNode(position)
if node != None:
node.data = item
def find(self,data):
node = self.head
while node != None:
if node.data == data:
break
node = node.link
return node
def insert(self,position,data):
node = self.getNode(position-1)
if node == None:
self.head = mine_node.Node(data,self.head)
else:
insert_node = mine_node.Node(data,node.link)
node.link = insert_node
def delete(self,position):
node = self.getNode(position-1)
if node != None:
if self.head != None:
self.head = self.head.link
elif node.link != None:
node.link = node.link.link | [
"[email protected]"
] | |
4724d5aa9415a81ce783f5bab5bea5842e84c4e9 | 217440fcc3a91e4ad1a8e008bd315128de7d571a | /day11/08-常见类的函数.py | 9a1178d3e15539060839c925447403eea8ccf73c | [] | no_license | zhangbo111/0102-0917 | a6af056ce9c9a8ab9500e8d016846dc6c50ec1c6 | 1631ea402612e82ae62b093749e2c4f19a021c63 | refs/heads/master | 2020-04-18T16:55:41.675156 | 2019-02-12T01:48:25 | 2019-02-12T01:48:25 | 167,643,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | class Father:
pass
class Mother:
pass
class Laowang:
pass
class Son(Father, Mother):
pass
# 检测一个类是否是另外一个类的子类 如果是返回True 否 False
result1 = issubclass(Son, Father)
result2 = issubclass(Son, Mother)
result3 = issubclass(Son, Laowang)
# 检测Son类是否是Mother类或者Laowang类的子类 满足一个就可以
result4 = issubclass(Son, (Mother, Laowang))
print(result1, result2, result3, result4)
| [
"[email protected]"
] | |
5f7a6e0094d7dff4e2a88f1833c2b9afbec85264 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/rtdmc/ajpfilterpol.py | 2d8941c96e207a74adc8b90ad0b1cdbcb211fabc | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,481 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AJPFilterPol(Mo):
meta = ClassMeta("cobra.model.rtdmc.AJPFilterPol")
meta.isAbstract = True
meta.moClassName = "rtdmcAJPFilterPol"
meta.moClassName = "rtdmcAJPFilterPol"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract JP Filter Policy"
meta.writeAccessMask = 0x20000001
meta.readAccessMask = 0x20000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.rtdmc.RsFilterToRtMapPol")
meta.childClasses.add("cobra.model.pim.RouteMapDef")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.rtdmc.RsFilterToRtMapPol", "rsfilterToRtMapPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.pim.RouteMapDef", "rtmapdef"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.rtdmc.AFilterPol")
meta.concreteSubClasses.add("cobra.model.pim.JPOutbFilterDef")
meta.concreteSubClasses.add("cobra.model.pim.JPInbFilterPol")
meta.concreteSubClasses.add("cobra.model.pim.JPInbFilterDef")
meta.concreteSubClasses.add("cobra.model.pim.JPOutbFilterPol")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
e2a0d3c0ad23256cde4b53012bff5b0474c41b96 | 56014da6ebc817dcb3b7a136df8b11cf9f976d93 | /Python基础笔记/05-读写文件及办公文档自动化/05.04-OS模块.py | 09d813dea42521efecdd3acd34a51b2e3b7f6223 | [] | no_license | sunday2146/notes-python | 52b2441c981c1106e70a94b999e986999334239a | e19d2aee1aa9433598ac3c0a2a73b0c1e8fa6dc2 | refs/heads/master | 2022-01-12T22:55:45.401326 | 2019-01-18T03:18:26 | 2019-01-18T03:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | import os
"""
os:包含了普遍的操作系统的功能
"""
#nt---windows系统 posix---linux,Unix或Mac OS X
#获取操作系统 类型
print(os.name)
#print(os.unname)--打印操作系统详细的信息,windows不支持
#获取操作系统中的环境变量
print(os.environ)
#获取指定环境变量
print(os.environ.get("appdata"))
#获得当前目录 ./a/
print(os.curdir)
#获取当前工作目录,即当前python脚本所在的目录
print(os.getcwd())
#以列表的形式返回指定目录下所有文件
print(os.listdir(r"C:\Users\Zhangyadi\Desktop"))
#在当前目录下创建新目录
#os.mkdir("sunck")
#删除目录
#os.rmdir("sunck")
#获取文件属性
#print(os.stat("sunck"))
#重命名
#os.rename("sunck","kaige")
#删除普通文件
#os.remove("hello.py.txt")
#运行shell命令---记事本
#os.system("notepad")
#os.system("write")-写字板
#os.system("mspaint")--画板
#os.system("shutdown-s-t 500")-自动关机
#os.system("shutdown-a")-取消
#os.system("taskkill/f /im notepad.exe")--关闭
#有些方法存在os模块里,还有写存在与os.path
#查看当前的绝对路径
print(os.path.abspath("kaige"))
#拼接路径
p1 = r"C:\Users\Zhangyadi\Desktop\project"
p2 = "sunck"
#注意:参数2里开始不要有斜杠\
#C:\Users\Zhangyadi\Desktop\project\sunck
print(os.path.join(p1,p2))
p3 = "/root/sunck/home"
p4 = "kaige"
print(os.path.join(p3,p4))
#拆分路径
path2 = r"C:\Users\Zhangyadi\Desktop\project\kaige"
print(os.path.split(path2))
#获取扩展名
print(os.path.splitext(path2))
#判断是否是目录
print(os.path.isdir(path2))
#判断文件是否存在
path3 = r"C:\Users\Zhangyadi\Desktop\56fil6.txt"
print(os.path.isfile(path3))
#判断目录是否存在
print(os.path.exists(path2))
#获得文件大小(字节)
print(os.path.getsize(path3))
#获得文件的目录
print(os.path.dirname(path3))
print(os.path.basename(path3))#获取文件名
| [
"[email protected]"
] | |
2e9229b24034579de5a50820f63ba0a844cc8d76 | 765b765c7b25cd9e6fa7513794a639ecbd210c95 | /powerapi/test_utils/__init__.py | 582930c5a980fc9c881f27d276ff8838ca81f53b | [
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0"
] | permissive | PierreRustOrange/powerapi | 80df09dc72bf248a999216a9f5e0a167b8ea4e5e | 400ee58d3dc9f3bb6706b12571cb10afd8a91787 | refs/heads/master | 2022-02-13T06:15:22.576416 | 2021-10-28T12:01:17 | 2021-10-28T12:01:17 | 244,576,648 | 0 | 0 | BSD-3-Clause | 2020-03-03T08:05:07 | 2020-03-03T08:05:06 | null | UTF-8 | Python | false | false | 1,569 | py | # Copyright (c) 2021, INRIA
# Copyright (c) 2021, University of Lille
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| [
"[email protected]"
] | |
a71eaf902c6b63983c91e8caf7675b99dd64e78b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/303/80291/submittedfiles/testes.py | f8fdf8b73bca451bb6d4e647f3f675e329669678 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # -*- coding: utf-8 -*-
n= int(input('Digite um numero:'))
if n%3==0 and n%7==0:
print(' O numero é divisivel por 3 e por 7')
else:
print(' o numero nao é divisivel por 3 e por 7')
| [
"[email protected]"
] | |
9885653186d1619aaa626651335b51322f938b13 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02754/s775628525.py | c08aa17fa27593932f995ed4aa58535828193f96 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | if __name__ == '__main__':
n,a,b = map(int,input().split())
tmp1 = (n // (a + b)) * a
if tmp1 == 0:
tmp2 = min(n,a)
else:
tmp2 = n % (a + b)
tmp2 = min(a,tmp2)
print(tmp1+tmp2) | [
"[email protected]"
] | |
94607147cb6a428256583d99cca352c265328f80 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02595/s086135640.py | 10579db5c1e5ef2a5300d80b183a84bc3668641d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | n,d = map(int,input().split())
cnt = 0
md = d**2
for _ in range(n):
a,b = map(int,input().split())
if md >= (a**2+b**2):
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
183f7b8c55dcea8984a0f890ca6d83b8360ce420 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/Summer20UL17/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8_cff.py | 19be89904fdebd86931dd97d867f4863815778f5 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 27,922 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0130ADED-8944-C644-A802-94AFC869D180.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/02A675E2-0B98-1E47-BEF2-87AE267B3872.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/05947A84-468D-0A4B-824F-F909985C25A2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0EB832E6-CFDA-1F4F-831E-80EF025519F4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0F0728C6-805B-FE44-B8FA-8B6187AFEF8E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0F3CD983-9A34-484F-933F-3C21DC6C9E9D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1021FA02-DD5C-FE4A-9829-79C55F702D5B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1050621F-A8FA-9E4E-8EB3-5F00EE52E5E1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1145C4B6-FBEE-AE44-A882-B83E9F95EECF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/131DFA05-56CF-2941-8003-661F79235C14.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1334FCA4-B8BC-8947-AA50-C254B3A4CE5C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/13CC0632-BB4F-8243-9A01-49F8EF020833.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/16E97FF6-09B6-AD4E-97BE-5B0FA146C3C0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/172A350A-7746-374B-95F7-6504F93336E6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1D6AC295-4F43-F14C-B55D-86F6FC83B962.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1D891B47-4DA9-1C46-B491-82044BD05D26.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1F10683A-0032-1044-8E8C-3892254F215A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/20C72032-4DC2-0B4E-AFFA-8F12A44E3060.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/290937DC-867D-3143-A8A4-917F6D31E53B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/2EB5DE9B-B61C-8F4B-B295-B0F235019434.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/31C75064-B139-4E4F-B592-732566F66DAA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/33FA51CA-1F64-104D-B5DD-AEBF47F612F3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/34E98EFE-AA61-8142-9152-247888299ED0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/35DA2631-3BB1-FC4E-8A2A-AE7B2E4E5476.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/37425CC5-CBE9-464E-96E9-5C06D4F7CA44.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/38BAC4EF-B549-124E-ADE1-BBF142C9C1B2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/3DAF4FC3-9097-0542-9A41-19B6C85EC65C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/3E833A9C-A5D7-044C-82CE-7835A22FAD5F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/3F6BF9EE-2AB9-B748-B54B-DD1A016D89DD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/45F68B41-93BC-7040-9CB0-D39E52AC2144.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4A717FB3-FB8C-7349-A536-8B2D43A5CEC9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4B14CF1A-A022-3643-9FDB-20EE9FC6EED9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4B6744E7-2A4F-624E-B577-9B59FA360FA9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4B692BBD-CAB4-314F-93DF-F3B656F8129A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4BA885FD-17E1-D54A-9D02-655EAD670F28.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/54739C76-024C-8F4C-9F2F-4CD073BE5896.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/56AA3A5D-6B5C-D54C-8B70-24A99BD4C0D8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/57918A90-9481-8D47-B47D-DE329B3D5889.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/57D0B089-45FA-5840-9826-5C500B55C896.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/57DE6F33-6427-0F4A-BB04-41ACEBB2170B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/5A73A109-2336-A54C-8C92-AC4FE5C46949.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/5B887B67-FEE6-3347-A798-779B15B1B03E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/5BDC6A67-F02D-AA4B-827A-2080F4804209.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/64131FFA-2DC6-C247-B8BA-5EB3285B660A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/662A4882-EDD2-0A49-96F5-DF010A222784.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/66402955-6310-7944-A363-2ACFD06BB460.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/66FDCBF5-79B2-284F-BB67-AD47A7FDC86B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/68B17790-E335-C746-9E29-C8A3497FBC02.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/68DD74DB-F918-624A-99FA-9C651991F84B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6A5535C4-5F5B-164B-B616-48487984C880.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6A7E4CEB-260F-8B4B-9393-8B9467E7E60D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6BAE3043-3D74-E641-B6D7-3A6248297045.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6E59C4F4-4388-EA43-8337-DD7A7F421AAD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/749AB99A-5BBF-1F45-8D69-4524E98187C6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/74F9CCA6-49D7-464A-A276-323339BCCD0E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/75FB00E3-AC8A-4B4D-B234-07AE0193BB6E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/78E2CB0E-556C-3B48-B210-CEDBA0647D61.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/79646C05-8327-3948-8387-B747CBCF674D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7AE660E0-C428-AA46-B65B-23CFB6F6F172.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7BC9D945-CC67-1747-8684-87430F96675C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7CBF2F34-2FB5-6042-8F9C-AF9B739F5F68.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7E39A5FB-303A-CE44-878D-5FD31CCAF56E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7E3E7A04-2518-8049-8306-2E47440BE03B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7EE3170D-7EB6-D24A-A66F-8D17AEA997BB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7F47D1B5-0EB4-0B44-8C03-C3CCF057CB0E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7F774269-4E11-454C-B65C-5E19F27CEEF6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/83BD6D39-20DC-AE4A-93CE-A0604AB1BE55.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/8630EF2E-9EC6-9946-982E-A4C157B5B7A9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/86D0FB68-F7A2-2649-9E6B-2D49A482BA95.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/8C2DA017-400F-224D-95DE-9824DCECE98E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/8F4777F6-DEF7-A545-A32C-6315B6772486.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9255A4BB-EF3C-ED4D-811F-0768C19CB5A9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9339D244-3F3B-484F-983B-6FF921C669AC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/944EFD7D-54B4-D64B-B44D-E6C1B3B936A4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9483732B-D0D2-6E4B-9E9E-E0B8ABE1F031.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9628BD9C-6E4E-E548-B4F0-3B4ABC4BB261.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/96CF3650-77A0-3649-9F80-B3F09B0A9F8A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/98D8E852-ED3C-E54A-80BA-DE8B98C37398.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9ACAC3C2-0B25-5C4E-9220-71530D28E4C9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9B8AEEF5-7D1C-E243-92C8-1EC1CFC27BBE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9DCF6A19-0565-024C-8DFD-F7619413DEA7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9E70111A-8B9B-504D-B28A-93C6E009FA06.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9EBF7D46-AFB9-1F4B-83F0-639F135B7918.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/A1ECB0DB-D3FF-A741-9AFC-D0DF8BA9DA6C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/A367897E-1FAE-E847-B9BC-6EE067CB7ACA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/A3CD50BE-2200-7146-9EF8-2E44BC3FB1CC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/AB255731-385F-BB40-89C3-405BE139B458.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/AD3C4FE4-B1BF-EC44-976F-09DF59154E3E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/AF7A6431-2EC1-AF41-9103-F846648CB826.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B545C91E-270A-5C41-937C-C673BF91D2E2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B60045B1-6FB1-384C-AE91-E939DF2591E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B7B5DC5A-9FFF-2348-9EBB-95F36B8D5673.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B86E9CE7-6F5A-9049-8522-236ED9AA7181.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B92BBBB5-2EB9-544B-A685-9C5FB4A148CA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B98FDA60-5B9D-A043-A84B-80E58A9C6AC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/BC8E5998-3F71-5842-8630-F18439372DA1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/BCCC85C1-8063-5A47-97AC-FDAC23A9E43D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C14BBECC-3244-3E4E-A079-497DBA522F24.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C2C0EEA6-27F7-CE41-B71D-37C1C8BF3B47.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C58D362D-147D-F74E-88AA-891258127F5E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C712065D-B57D-CB40-AB8F-34A3EBEC3C67.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C8C07EEB-B704-AC48-BE36-0624A80D8376.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C98D99FC-02C6-4147-8F5E-B46EC5B0595D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CAC8883D-8927-6549-B09D-AFE5A9AF7C6B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CBB7E645-8D71-5342-BCE9-D6DB4EDD4011.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CCACB17A-7EC8-2947-BF9B-B7EB9C100DE7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CCC3DFE9-387E-B24E-B6C5-8FCC0E6509C4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CE57397B-195D-0344-A02D-5A20DA469F89.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D20B7A34-61CC-1244-A837-B91344F18709.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D2536683-026F-FB4C-93DB-D506582625A7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D2B8F723-A16F-A14A-A631-9435E970B01E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D3F4F9F3-8153-4545-8F27-389AAF65512F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D486B0A5-374D-5C46-836C-090ACF060682.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D4971DD8-0764-394F-AA84-93825254B832.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D4AF7D3E-44BA-FB42-961D-8A7492E3140E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D4CBF585-C586-A14E-AEF2-786097584A7A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D524DE0C-884B-4147-B79F-195AFD52EBF8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D7D6C945-6F4B-6547-A9DA-9593B1C65863.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D96BBAFF-ACAF-7B4B-A277-0C6B0DFE03EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D99AD1D0-4E23-5B41-8B39-1DA3AB4DF5AA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/DAB41616-4FC2-C64D-BBB6-EA332F662005.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/DAD25896-8067-FC47-87A4-D4A41C75143E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/DC95735D-A3C6-F94E-AFE0-B0C05F5C65D4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E10EF7C2-4F65-E74E-BE5C-ECCC58300813.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E23D0003-7D66-904B-8D7B-659913353094.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E2932D79-4F07-F74C-8951-E76E964FF337.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E3366E88-506E-864D-8EB0-89597FBD8C7C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E6AF960D-B1BD-4243-9032-FF89345A8E2B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E6DE11A5-9C8D-AF44-9D03-10275BF74E1C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E736C29B-E99F-9C41-B331-9C49BAD77CA0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E77EFD29-F90B-8444-A5FE-875B733F4F7A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E8DACCEA-3E32-7848-B47F-D7A5AB2FD70B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E94E464D-7863-F343-89AC-979DAD8737A6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/EB852FB0-60E0-3A42-872A-E79AB497E05B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/EDE432D0-A649-C943-8C8E-B7A4F4F866EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/EEB244CE-3042-804A-A2DA-233EFEAF91A5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F028EE1D-9CA9-D34B-8249-2E2F71CCD97D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F4876263-09B5-344F-A218-ACC6284C325F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F559302D-0CE2-3E49-AE93-D8AE26CBD03C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F7780C0A-61CB-9147-B26B-16192EA5A9F9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/FBEA71D1-1339-0445-A7F8-CE8580DC595E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/FE742668-1A9C-8D43-AB59-22B7885B15E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/FE999DD6-166D-8044-A406-27FC27DB091D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/272686A0-A7C8-754A-A8D3-5370DE966BC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/2B9C8E72-3983-2448-9165-DDCB1E9AF1C2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/47C3EE8E-EC34-4942-B581-BD3A3A5C39BF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/60064A3B-D343-C44A-9106-F4EB0A38FBA1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/616D7EAA-1890-A84D-A3D4-CFA0ADD930FF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/D2D0F93D-E08C-8B41-B5DD-48A0E56E9742.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/70000/1245718C-A726-D543-B2C5-304FAB911516.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/70000/48D8FBBB-C840-F744-BEE3-28119A29E0F4.root',
] )
| [
"[email protected]"
] | |
b37b2e218d9b6497281ffcb42383e42614c8930c | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/toolbox/abreFile_20191127163354.py | 2da87cf946b4539934df6748b231c06528e4165f | [] | no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import pandas as pd
import os
def abreFile():
oldAddres = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/'
newFile = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/extract.xlsx'
def encontraArquivosEmPastaRecursivamente(pasta, extensao):
arquivosTxt = []
caminhoAbsoluto = os.path.abspath(pasta)
for pastaAtual, subPastas, arquivos in os.walk(caminhoAbsoluto):
arquivosTxt.extend([os.path.join(pastaAtual,arquivo) for arquivo in arquivos if arquivo.endswith('.xls')])
return arquivosTxt
old = encontraArquivosEmPastaRecursivamente(oldAddres, '.xls')
print(old[0])
os.rename(old[0],newFile)
# wb = pd.ExcelFile('./file/extract.xlsx')
# df = pd.read_excel(wb)
# print(df.head())
abreFile() | [
"[email protected]"
] | |
3855a95c8084c4bb4923ae0522d68b3251d55a9c | bfb1db9b58064f63ed8040b50d5fe3b4664adc01 | /wechat_django/decorators.py | 689644e5a65f53d8e12040fa0cf847b3d445f9e8 | [
"MIT"
] | permissive | hvv1616/wechat-django | 74947d7ea126e507d649cb152af1a66d68593a8f | 5599f237bc1781a594102ce7ff491086f8cf69d2 | refs/heads/master | 2020-04-30T07:22:38.427671 | 2019-03-18T12:56:20 | 2019-03-18T12:56:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import wraps
from six import text_type
__all__ = ("message_handler", )
def message_handler(names_or_func=None):
"""自定义回复业务需加装该装饰器
被装饰的自定义业务接收一个``wechat_django.models.WeChatMessageInfo``对象
并且返回一个``wechatpy.replies.BaseReply``对象
:param names_or_func: 允许使用该message_handler的appname 不填所有均允许
:type names_or_func: str or list or tuple or callable
@message_handler
def custom_business(message):
user = message.user
# ...
return TextReply("hello", message=message.message)
@message_handler(("app_a", "app_b"))
def app_ab_only_business(message):
# ...
"""
def decorator(view_func):
@wraps(view_func)
def decorated_view(message):
return view_func(message)
decorated_view.message_handler = names or True
return decorated_view
if isinstance(names_or_func, text_type):
names = [names_or_func]
elif callable(names_or_func):
names = None
return decorator(names_or_func)
return decorator
| [
"[email protected]"
] | |
db8cdad93128a19ba84640c54d3a3bcf21458506 | dc798f062b15e6ad060a5cfb731db5f286e2088b | /lesson7/task4/tests.py | 0dc1cb67e0b5856510c204ea5431b442ee148c04 | [] | no_license | DmitryTsybulkin/stepik-python | dce78c4fe616fe3f5bd26e0dad9c80bc5c5c4ab2 | 0726346f43e21623a1200aa76b9c7e9ff5476844 | refs/heads/master | 2020-04-27T22:28:33.695541 | 2019-09-13T14:03:28 | 2019-09-13T14:03:28 | 174,738,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["480\n1\n2","9\n2"],["475\n1\n55","9\n50"]]) | [
"[email protected]"
] | |
68cdea4e70011e9f6aed99dc512556fe7e0e6826 | b81668a2cc43654cf6a3ed952d781310876838f9 | /venv/Lib/site-packages/thinc/backends/ops.py | 838ae8c0ffa67b6b448fc765e4e95f30422fb0bd | [] | no_license | gowthamr1999/docbot-1 | 6a8b873407f15035fb8b30b69ed66ded343bd1e4 | 3119958d68e95673b4c9187d58d8cad5c18a6b2c | refs/heads/master | 2023-04-07T02:16:55.574750 | 2021-04-16T02:52:38 | 2021-04-16T02:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,936 | py | from typing import Optional, List, Tuple, Sequence, Union, cast, TypeVar
from typing import Iterator, overload
import numpy
import itertools
from ..types import Xp, Shape, DTypes, DTypesInt, DTypesFloat, List2d, ArrayXd
from ..types import Array2d, Array3d, Floats1d, Floats2d, Floats3d, Floats4d
from ..types import FloatsXd, Ints1d, Ints2d, Ints3d, Ints4d, IntsXd, _Floats
from ..types import DeviceTypes, Generator, Padded, Batchable, SizedGenerator
from ..util import get_array_module, is_xp_array, to_numpy
ArrayT = TypeVar("ArrayT", bound=ArrayXd)
FloatsT = TypeVar("FloatsT", bound=_Floats)
class Ops:
name: str = "base"
xp: Xp = numpy
def __init__(
self, device_type: DeviceTypes = "cpu", device_id: int = -1, **kwargs
) -> None:
self.device_type = device_type
self.device_id = device_id
def to_numpy(self, data): # pragma: no cover
if isinstance(data, numpy.ndarray):
return data
else:
raise ValueError("Cannot convert non-numpy from base Ops class")
def minibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Iterate slices from a sequence, optionally shuffled. Slices
may be either views or copies of the underlying data.
The `size` argument may be either an integer, or a sequence of integers.
If a sequence, a new size is drawn before every output.
If shuffle is True, shuffled batches are produced by first generating
an index array, shuffling it, and then using it to slice into the
sequence.
An internal queue of `buffer` items is accumulated before being each
output. Buffering is useful for some devices, to allow the
network to run asynchronously without blocking on every batch.
"""
if not hasattr(sequence, "__len__"):
err = f"Can't minibatch data. Expected sequence, got {type(sequence)}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
# This is a bit convoluted, but it's a time where convenience makes
# trickery worthwhile: instead of being an actual generator, we
# return our SizedGenerator object, which provides a __len__.
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
queue.append(self._get_batch(sequence, indices[i : i + size]))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def multibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*others: Batchable,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Minibatch one or more sequences of data, and yield
lists with one batch per sequence. See ops.minibatch.
"""
# You'd think we could just do this by calling into minibatch and zip...
# But the shuffling makes it really hard.
sequences = (sequence,) + tuple(others)
if not all(hasattr(seq, "__len__") for seq in sequences):
values = ", ".join([f"{type(seq)}" for seq in sequences])
err = f"Can't multibatch data. Expected sequences, got {values}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
idx_batch = indices[i : i + size]
queue.append([])
for sequence in sequences:
queue[-1].append(self._get_batch(sequence, idx_batch))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def _get_batch(self, sequence, indices):
if isinstance(sequence, list):
subseq = [sequence[i] for i in indices]
elif isinstance(sequence, tuple):
subseq = tuple(sequence[i] for i in indices) # type: ignore
else:
subseq = sequence[indices] # type: ignore
if is_xp_array(subseq):
subseq = self.as_contig(
cast(ArrayXd, self.xp.asarray(subseq))
) # type: ignore
return subseq
def _get_batch_sizes(self, length: int, sizes: Iterator[int]):
output = []
i = 0
while i < length:
output.append(next(sizes))
i += output[-1]
return output
def seq2col(self, seq: Floats2d, nW: int) -> Floats2d:
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1))
sequence. The new sequence is constructed by concatenating nW preceding
and succeeding vectors onto each column in the sequence, to extract a
window of features.
"""
# This is a test implementation that only supports nW=1
assert nW == 1
B = seq.shape[0]
I = seq.shape[1]
cols = self.alloc3f(B, (nW * 2 + 1), I)
# Copy left contexts. The last words aren't the left-context for anything.
cols[nW:, :nW] = self.reshape3f(seq[:-nW], -1, nW, I)
cols[:, nW] = seq
cols[:-nW, nW + 1 :] = self.reshape3f(seq[nW:], -1, nW, I)
return self.reshape2f(cols, B, I * (2 * nW + 1))
def backprop_seq2col(self, dY: Floats2d, nW: int) -> Floats2d:
"""The reverse/backward operation of the `seq2col` function: calculate
the gradient of the original `(M, N)` sequence, as a function of the
gradient of the output `(M, N*(nW*2+1))` sequence.
"""
# This is a test implementation that only supports nW=1
assert nW == 1
nF = nW * 2 + 1
B = dY.shape[0]
I = dY.shape[1] // nF
# Having trouble getting the kernel to work...
dX = self.alloc2f(B, I)
dY3d = self.reshape3f(dY, B, nF, I)
dX[:-nW] += self.reshape2f(dY3d[nW:, :nW], -1, I)
dX += dY3d[:, nW]
dX[nW:] += self.reshape2f(dY3d[:-nW, nW + 1 :], -1, I)
return dX
def gemm(
self,
x: Floats2d,
y: Floats2d,
out: Optional[Floats2d] = None,
trans1: bool = False,
trans2: bool = False,
) -> Floats2d:
"""Perform General Matrix Multiplication (GeMM) and optionally store
the result in the specified output variable.
"""
if trans1:
x = x.T
if trans2:
y = y.T
if out is None:
return self.xp.dot(x, y)
else:
self.xp.dot(x, y, out=out)
return out
def affine(self, X: Floats2d, W: Floats2d, b: Floats1d) -> Floats2d:
"""Apply a weights layer and a bias to some inputs, i.e.
Y = X @ W.T + b
"""
Y = self.gemm(X, W, trans2=True)
Y += b
return Y
def flatten(
self,
X: Sequence[ArrayT],
dtype: Optional[DTypes] = None,
pad: int = 0,
ndim_if_empty: int = 2,
) -> ArrayT:
"""Flatten a list of arrays into one large array."""
if X is None or len(X) == 0:
return self.alloc((0,) * ndim_if_empty, dtype=dtype or "f")
xp = get_array_module(X[0])
X = [x for x in X if x.size != 0]
if int(pad) >= 1:
padded = []
for x in X:
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
padded.append(x)
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
X = padded
result = xp.concatenate(X)
if dtype is not None:
result = xp.asarray(result, dtype=dtype)
return result
def unflatten(self, X: Floats2d, lengths: Ints1d, pad: int = 0) -> List[Floats2d]:
"""The reverse/backward operation of the `flatten` function: unflatten
a large array into a list of arrays according to the given lengths.
"""
unflat = []
pad = int(pad)
for length in lengths:
length = int(length)
if pad >= 1 and length != 0:
X = X[pad:]
unflat.append(X[:length])
X = X[length:]
if pad >= 1:
X = X[pad:]
assert len(X) == 0
assert len(unflat) == len(lengths)
return unflat
@overload
def pad(self, seqs: List[Ints2d], round_to=1) -> Ints3d:
...
@overload # noqa: F811
def pad(self, seqs: List[Floats2d], round_to=1) -> Floats3d:
...
def pad( # noqa: F811
self, seqs: Union[List[Ints2d], List[Floats2d]], round_to=1
) -> Array3d:
"""Perform padding on a list of arrays so that they each have the same
length, by taking the maximum dimension across each axis. This only
works on non-empty sequences with the same `ndim` and `dtype`.
"""
# TODO: This should be generalized to handle different ranks
if not seqs:
raise ValueError("Cannot pad empty sequence")
if len(set(seq.ndim for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different ndims")
if len(set(seq.dtype for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different dtypes")
if len(set(seq.shape[1:] for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences that differ on other dimensions")
# Find the maximum dimension along each axis. That's what we'll pad to.
length = max(len(seq) for seq in seqs)
# Round the length to nearest bucket -- helps on GPU, to make similar
# array sizes.
length = (length + (round_to - 1)) // round_to * round_to
final_shape = (len(seqs), length) + seqs[0].shape[1:]
output: Array3d = self.alloc(final_shape, dtype=seqs[0].dtype)
for i, arr in enumerate(seqs):
# It's difficult to convince this that the dtypes will match.
output[i, : arr.shape[0]] = arr # type: ignore
return output
def unpad(self, padded: Array3d, lengths: List[int]) -> List2d:
"""The reverse/backward operation of the `pad` function: transform an
array back into a list of arrays, each with their original length.
"""
output = []
for i, length in enumerate(lengths):
output.append(padded[i, :length])
return cast(List2d, output)
def list2padded(self, seqs: List[Floats2d]) -> Padded:
"""Pack a sequence of 2d arrays into a Padded datatype."""
if not seqs:
return Padded(
self.alloc3f(0, 0, 0), self.alloc1i(0), self.alloc1i(0), self.alloc1i(0)
)
elif len(seqs) == 1:
data = self.reshape3f(seqs[0], seqs[0].shape[0], 1, seqs[0].shape[1])
size_at_t = self.asarray1i([1] * data.shape[0])
lengths = self.asarray1i([data.shape[0]])
indices = self.asarray1i([0])
return Padded(data, size_at_t, lengths, indices)
lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)]
lengths_indices.sort(reverse=True)
indices_ = [i for length, i in lengths_indices]
lengths_ = [length for length, i in lengths_indices]
nS = max([len(seq) for seq in seqs])
# Reorder the sequences, by length. This looks the same in either
# direction: you're swapping elements between their original and sorted
# position.
seqs = [seqs[x] for x in indices_]
arr: Floats3d = self.pad(seqs)
arr = self.as_contig(arr.transpose((1, 0, 2)))
# Build a lookup table so we can find how big the batch is at point t.
batch_size_at_t_ = self.alloc1i(nS)
batch_size_at_t_ += 1
i = len(lengths_)
for t in range(nS):
if t == lengths_[i - 1]:
i -= 1
if i == 0:
break
batch_size_at_t_[t] = i
return Padded(
cast(Floats3d, arr),
self.asarray1i(batch_size_at_t_),
self.asarray1i(lengths_),
self.asarray1i(indices_),
)
def padded2list(self, padded: Padded) -> List2d:
"""Unpack a Padded datatype to a list of 2-dimensional arrays."""
data = padded.data
indices = to_numpy(padded.indices)
lengths = to_numpy(padded.lengths)
unpadded: List[Optional[Floats2d]] = [None] * len(lengths)
data = self.as_contig(data.transpose((1, 0, 2)))
for i in range(data.shape[0]):
unpadded[indices[i]] = data[i, : int(lengths[i])]
return cast(List2d, unpadded)
def get_dropout_mask(self, shape: Shape, drop: Optional[float]) -> FloatsXd:
"""Create a random mask for applying dropout, with a certain percent of
the mask (defined by `drop`) will contain zeros. The neurons at those
positions will be deactivated during training, resulting in a more
robust network and less overfitting.
"""
if drop is None or drop <= 0:
return self.xp.ones(shape, dtype="f")
elif drop >= 1.0:
return self.alloc(shape)
coinflips = self.xp.random.uniform(0.0, 1.0, shape)
mask = (coinflips >= drop) / (1.0 - drop)
return cast(FloatsXd, self.asarray(mask, dtype="float32"))
def alloc1f(self, d0: int, *, dtype: Optional[DTypesFloat] = "float32") -> Floats1d:
return self.alloc((d0,), dtype=dtype)
def alloc2f(
self, d0: int, d1: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3f(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4f(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesFloat] = "float32",
) -> Floats4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_f(
self, shape: Shape, *, dtype: Optional[DTypesFloat] = "float32"
) -> FloatsXd:
return self.alloc(shape, dtype=dtype)
def alloc1i(self, d0: int, *, dtype: Optional[DTypesInt] = "int32") -> Ints1d:
return self.alloc((d0,), dtype=dtype)
def alloc2i(
self, d0: int, d1: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3i(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4i(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesInt] = "int32",
) -> Ints4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_i(self, shape: Shape, *, dtype: Optional[DTypesInt] = "int32") -> IntsXd:
return self.alloc(shape, dtype=dtype)
def alloc(self, shape: Shape, *, dtype: Optional[DTypes] = "float32") -> ArrayT:
"""Allocate an array of a certain shape."""
if isinstance(shape, int):
shape = (shape,)
return self.xp.zeros(shape, dtype=dtype)
def reshape1f(self, array: FloatsXd, d0: int) -> Floats1d:
return cast(Floats1d, self.reshape(array, (d0,)))
def reshape2f(self, array: FloatsXd, d0: int, d1: int) -> Floats2d:
return cast(Floats2d, self.reshape(array, (d0, d1)))
def reshape3f(self, array: FloatsXd, d0: int, d1: int, d2: int) -> Floats3d:
return cast(Floats3d, self.reshape(array, (d0, d1, d2)))
def reshape4f(
self, array: FloatsXd, d0: int, d1: int, d2: int, d3: int
) -> Floats4d:
return cast(Floats4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_f(self, array: FloatsXd, shape: Shape) -> FloatsXd:
return self.reshape(array, shape)
def reshape1i(self, array: IntsXd, d0: int) -> Ints1d:
return cast(Ints1d, self.reshape(array, (d0,)))
def reshape2i(self, array: IntsXd, d0: int, d1: int) -> Ints2d:
return cast(Ints2d, self.reshape(array, (d0, d1)))
def reshape3i(self, array: IntsXd, d0: int, d1: int, d2: int) -> Ints3d:
return cast(Ints3d, self.reshape(array, (d0, d1, d2)))
def reshape4i(self, array: IntsXd, d0: int, d1: int, d2: int, d3: int) -> Ints4d:
return cast(Ints4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_i(self, array: IntsXd, shape: Shape) -> IntsXd:
return self.reshape(array, shape)
def reshape(self, array: ArrayT, shape: Shape) -> ArrayT:
"""Reshape an array."""
if isinstance(shape, int):
shape = (shape,)
return cast(ArrayT, array.reshape(shape))
def asarray4f(
self,
data: Union[Floats4d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats4d:
return cast(Floats4d, self.asarray(data, dtype=dtype))
def asarray3f(
self,
data: Union[Floats3d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats3d:
return cast(Floats3d, self.asarray(data, dtype=dtype))
def asarray2f(
self,
data: Union[Floats2d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats2d:
return cast(Floats2d, self.asarray(data, dtype=dtype))
def asarray1f(
self,
data: Union[Floats1d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats1d:
return cast(Floats1d, self.asarray(data, dtype=dtype))
def asarray_f(
self,
data: Union[FloatsXd, Sequence[float]],
*,
dtype: Optional[DTypes] = "float32",
) -> FloatsXd:
return cast(FloatsXd, self.asarray(data, dtype=dtype))
def asarray1i(
self, data: Union[Ints1d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints1d:
return cast(Ints1d, self.asarray(data, dtype=dtype))
def asarray2i(
self, data: Union[Ints2d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints2d:
return cast(Ints2d, self.asarray(data, dtype=dtype))
def asarray3i(
self, data: Union[Ints3d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints3d:
return cast(Ints3d, self.asarray(data, dtype=dtype))
def asarray4i(
self, data: Union[Ints4d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints4d:
return cast(Ints4d, self.asarray(data, dtype=dtype))
def asarray_i(
self, data: Union[IntsXd, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> IntsXd:
return cast(IntsXd, self.asarray(data, dtype=dtype))
def asarray(
self,
data: Union[ArrayXd, Sequence[ArrayXd], Sequence[float], Sequence[int]],
*,
dtype: Optional[DTypes] = None,
) -> ArrayXd:
"""Ensure a given array is of the correct type."""
if isinstance(data, self.xp.ndarray):
if dtype is not None:
return self.xp.asarray(data, dtype=dtype)
else:
return self.xp.asarray(data)
elif hasattr(data, "numpy"):
# Handles PyTorch Tensor
return data.numpy() # type: ignore
elif dtype is not None:
return self.xp.array(data, dtype=dtype)
else:
return self.xp.array(data)
def as_contig(self, data: ArrayT, dtype: Optional[DTypes] = None) -> ArrayT:
"""Allow the backend to make a contiguous copy of an array.
Implementations of `Ops` do not have to make a copy or make it
contiguous if that would not improve efficiency for the execution engine.
"""
kwargs = {"dtype": dtype} if dtype is not None else {}
return self.xp.ascontiguousarray(data, **kwargs)
def sigmoid(self, X: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
self.xp.exp(-X, out=X)
X += 1.0
X **= -1.0
return X
else:
return 1.0 / (1.0 + self.xp.exp(-X))
def dsigmoid(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
Y *= 1 - Y
return Y
else:
return Y * (1.0 - Y)
def dtanh(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
Y **= 2
Y *= -1.0
Y += 1.0
return Y
else:
return 1 - Y ** 2
def softmax(self, x: FloatsT, *, inplace: bool = False, axis: int = -1) -> FloatsT:
maxes = self.xp.max(x, axis=axis, keepdims=True)
shifted = x - maxes
new_x = self.xp.exp(shifted)
new_x /= new_x.sum(axis=axis, keepdims=True)
return new_x
def softmax_sequences(
self, Xs: Floats2d, lengths: Ints1d, *, inplace: bool = False, axis: int = -1
) -> Floats2d:
if Xs.ndim >= 3:
err = f"Softmax currently only supports 2d. Got: {Xs.ndim}"
raise NotImplementedError(err)
# This loses almost no fidelity, and helps the numerical stability.
Xs = self.xp.clip(Xs, -20.0, 20.0)
new_x = self.xp.exp(Xs)
summed = self.backprop_reduce_sum(self.reduce_sum(new_x, lengths), lengths)
new_x /= summed
return new_x
def backprop_softmax(self, Y: FloatsT, dY: FloatsT, *, axis: int = -1) -> FloatsT:
dX = Y * dY
dX -= Y * dX.sum(axis=axis, keepdims=True)
return dX
def backprop_softmax_sequences(
self, dY: Floats2d, Y: Floats2d, lengths: Ints1d
) -> Floats2d:
dX = Y * dY
sum_dX = self.backprop_reduce_sum(self.reduce_sum(dX, lengths), lengths)
dX -= Y * sum_dX
return dX
def recurrent_lstm(
self,
W: Floats2d,
b: Floats1d,
h_init: Floats1d,
c_init: Floats1d,
inputs: Floats3d,
is_train: bool = True,
) -> Tuple[Floats3d, Tuple[Floats3d, Floats3d, Floats3d]]:
Y, (G, C, S) = recurrent_lstm_forward(W, b, h_init, c_init, inputs)
return Y, (G, C, S)
def backprop_recurrent_lstm(
self,
dY: Floats3d,
fwd_state: Tuple[Floats3d, Floats3d, Floats3d],
params: Tuple[Floats2d, Floats1d],
) -> Tuple[Floats3d, Tuple[Floats2d, Floats1d, Floats1d, Floats1d]]:
dCt = self.alloc2f(dY.shape[1], dY.shape[2])
empty_row = self.alloc3f(1, dY.shape[1], dY.shape[2])
# Offset dY by 1
dY = self.xp.vstack((empty_row, dY))
dW, db, dX, dY, dC0 = backprop_recurrent_lstm(dY, dCt, (fwd_state, params))
return dX, (dW, db, dY[0].sum(axis=0), dC0.sum(axis=0))
def maxout(self, X: Floats3d) -> Tuple[Floats2d, Ints2d]:
which = X.argmax(axis=-1, keepdims=False)
return X.max(axis=-1), which
def backprop_maxout(self, dY: Floats2d, which: Ints2d, P: int) -> Floats3d:
dX = self.alloc3f(dY.shape[0], dY.shape[1], P)
for b in range(dY.shape[0]):
for o in range(dY.shape[1]):
dX[b, o, which[b, o]] = dY[b, o]
return dX
def relu(self, X: Floats2d, inplace: bool = False) -> Floats2d:
if not inplace:
return X * (X > 0)
else:
X *= X > 0
return X
def backprop_relu(
self, dY: Floats2d, Y: Floats2d, inplace: bool = False
) -> Floats2d:
if not inplace:
return dY * (Y > 0)
dY *= Y > 0
return dY
def mish(self, X: Floats2d, threshold: float = 20.0) -> Floats2d:
Y = self.alloc2f(*X.shape, dtype=X.dtype)
tmp = X * self.xp.tanh(self.xp.log(1.0 + self.xp.exp(X)))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i, j] >= threshold:
Y[i, j] = X[i, j]
else:
Y[i, j] = tmp[i, j]
return Y
def backprop_mish(
self,
dY: Floats2d,
X: Floats2d,
threshold: float = 20.0,
out: Optional[Floats2d] = None,
) -> Floats2d:
xp = get_array_module(X)
indices = X < threshold
Xsub = X[indices]
dYsub = dY[indices]
omega = 4.0 * (Xsub + 1.0)
omega += 4.0 * xp.exp(2.0 * Xsub)
omega += xp.exp(Xsub) * ((4.0 * Xsub) + 6.0)
delta = 2.0 * xp.exp(Xsub)
delta += xp.exp(2.0 * Xsub)
delta += 2.0
dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta ** 2))
if out is None:
out = xp.zeros(dY.shape, dtype="f")
# Gradient when above threshold will ignore softplus.
out[:] = dY + dY * self.dtanh(X)
out[indices] = dXsub
return out
def update_averages(
self, ema: FloatsT, weights: FloatsT, t: int, max_decay: float = 0.9999
) -> None:
# Internals for optimizer
decay = (1.0 + t) / (10.0 + t)
if decay > max_decay:
decay = max_decay
ema -= (1 - decay) * (ema - weights)
def adam(
self,
weights: Floats1d,
gradient: Floats1d,
mom1: Floats1d,
mom2: Floats1d,
beta1: float,
beta2: float,
eps: float,
learn_rate: float,
mod_rate: float = 1.0,
) -> Tuple[Floats1d, Floats1d, Floats1d, Floats1d]:
# Internals for optimizer
mom1 *= beta1
mom2 *= beta2
mom1 += gradient * (1.0 - beta1)
mom2 += gradient * gradient * (1.0 - beta2)
# Here we assume learn rate is calculated by the caller.
# cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t);
weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps))
return weights, gradient, mom1, mom2
def clip_gradient(self, gradient: FloatsT, threshold: float) -> FloatsT:
# Internals for optimizer
xp = get_array_module(gradient)
grad_norm = xp.linalg.norm(gradient)
if grad_norm >= threshold:
gradient *= threshold / grad_norm
return gradient
def logloss(self, y_true: FloatsT, y_pred: FloatsT) -> float:
# Currently not used
log_yp = self.xp.log(y_pred + 1e-8)
loss = (y_true * log_yp) + (1 - y_true) * self.xp.log((1 - y_pred) + 1e-8)
return -loss
def reduce_sum(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
Y[i] = X[start : start + length].sum(axis=0)
start += length
return Y
def reduce_mean(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
Y[i] = X[start : start + length].mean(axis=0)
start += length
return Y
def reduce_max(self, X: Floats2d, lengths: Ints1d) -> Tuple[Floats2d, Ints2d]:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
which = self.alloc2i(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
which[i] = X[start : start + length].argmax(axis=0)
Y[i] = X[start : start + length].max(axis=0)
start += length
return Y, which
def backprop_reduce_sum(self, d_sums: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_sums.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_sums[i]
start += length
return dX
def backprop_reduce_mean(self, d_means: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_means.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_means[i] / length
start += length
return dX
def backprop_reduce_max(
self, d_maxes: Floats2d, which: Ints2d, lengths: Ints1d
) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_maxes.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length, which[i]] = d_maxes[i]
start += length
return dX
def hash(self, ids: Ints1d, seed: int) -> Ints2d:
"""Hash a sequence of 64-bit keys into a table with 4 32-bit keys, using
murmurhash3.
"""
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2i(
numpy_ops.hash(numpy_ops.asarray(ids, dtype="uint64"), seed)
)
def ngrams(self, n: int, keys: Ints1d) -> Ints1d:
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray1i(
numpy_ops.ngrams(n, numpy_ops.asarray(keys, dtype="uint64"))
)
def position_encode(
self, N: int, D: int, period: int = 10000, out: Optional[Floats2d] = None
) -> Floats2d:
# Currently internals only
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2f(numpy_ops.position_encode(N, D, period, out))
def scatter_add(
self, table: FloatsXd, indices: IntsXd, values: FloatsXd
) -> FloatsXd:
return self.xp.add.at(table, indices, values)
def insert_into(self, shape, Xs):
"""Maybe don't need this? Just a quicky to get Jax working."""
output = self.alloc(shape, dtype=Xs[0].dtype)
for i, x in enumerate(Xs):
output[i, : x.shape[0]] = x
return output
# This code is intentionally almost-duplicate with the Jax one. It's kind
# of hard to condition on jax vs not jax without messing up the jax JIT,
# and we'll want to have a more specialised implementation for non-Jax
# versions. But for now this has been tested and works, so we'll just leave
# it as a reference implementation.
"""
LSTM Notation (kind of involved, but made it a lot easier to write)
X: Inputs
Y: Outputs (aka hiddens)
C: Cells
G: Gates (Output of non-linearity, i.e. lstm_gates(X @ W.T)
A: Activations (X @ W.T, before non-linearity)
Imagine we have the input:
batch = [
["apple", "banana", "cantaloupe", "date", "elderberry"],
["aardvark", "bat", "capybara", "dingo", "elephant"]
]
The input variable X will have one vector per word, so X[0, 1] will be banana's
vector, X[0, 1, 0] will be a float, the first element of that vector.
We're computing an output variable Y of shape (nL, nB, nO), so that Y[0, 1] is
the output variable of banana.
A problem with variables for RNNs is keeping the timesteps straight. It's hard
to distinguish the current, previous, and next timesteps. To solve this problem,
we follow the convention that **we are at timestep 3**.
Additionally, the variables for Y and C are offset by one, as the 0th elements
have the initial hiddens and initial cells. So:
t=3
Xt3: The input vectors for 'dingo' and 'date', i.e. X[t]
Yt3: The output vectors for 'dingo' and 'date', i.e. Y[t+1] (Y is offset.)
Ct2: The cells calculated at 'c...', that are the input for 'd...'
Ct3: The cells calculated at 'd...', that are the input for 'e...'
At3: The activations at 'd...'
Gt3: The gates at 'd...'
"""
def recurrent_lstm_forward(W, b, c_init, h_init, X):
xp = get_array_module(W)
nL, nB, nI = X.shape
nO = h_init.shape[0]
# Preallocate these so we can pass them through for loop.
Y = xp.zeros((nL + 1, nB, nO), dtype="f")
G = xp.zeros((nL, nB, nO * 4), dtype="f")
C = xp.zeros((nL + 1, nB, nO), dtype="f")
# Set initial hidden and cell states. The Y and C will be shifted 1,
# so that we can have fewer arrays.
Y[0] = h_init
C[0] = c_init
state = ((W, b, X), (Y, C, G))
for i in range(X.shape[0]):
state = lstm_stepper_forward(i, state)
(W, b, X), (Y, C, G) = state
# Recall that Y and C are both offset by 1. Y[1] is the output for
# X[1], while Y[0] was used as an input for Y[1]. We use
# the S values to backprop the weights, so we need X the previous Ys.
S = xp.concatenate((X, Y[:-1]), axis=-1)
return Y[1:], (G, C, S)
def lstm_stepper_forward(t, state):
(W, b, X), (Y, C, G) = state
# Get the activations for this timestep.
At3 = lstm_weights_forward(X[t], Y[t], W, b)
# The offsets here are a bit unintuitive, because Y and C are 1-offset.
Ct2 = C[t]
Yt3, Ct3, Gt3 = lstm_gates_forward(At3, Ct2)
Y[t + 1] = Yt3
C[t + 1] = Yt3
G[t] = Gt3
return (W, b, X), (Y, C, G)
def backprop_recurrent_lstm(dY, dCt, fwd_vars):
xp = get_array_module(dY)
(G, C, S), (W, b) = fwd_vars
nL = S.shape[0]
nB = dY.shape[1]
nI = S.shape[2] - dY.shape[2]
# Preallocate these so we can pass them through for loop.
dX = xp.zeros((nL, nB, nI), dtype="f")
dW = xp.zeros(W.shape, dtype="f")
db = xp.zeros(b.shape, dtype="f")
state = (
(dW, db, dX), # The gradi-outs (Write-only)
(dY, dCt), # The gradi-ins (Read and write)
(G, C, S), # Forward state (Read-only)
(W, b), # Params (Read-only)
)
for t in range(nL - 1, -1, -1):
state = backprop_lstm_stepper(t, state)
(dW, db, dX), (dY, dCt), (G, C, S), (W, b) = state
return dW, db, dX, dY, dCt
def backprop_lstm_stepper(t, state):
(dW, db, dX), (dY, dCt3), (G, C, S), (W, b) = state
# Recall, we're at step 3, Y and C are offset by 1. See above.
dYt3 = dY[t + 1]
Ct3 = C[t + 1]
St3 = S[t]
Gt3 = G[t]
Ct2 = C[t]
dAt3, dCt2 = backprop_lstm_gates(dCt3, dYt3, Gt3, Ct3, Ct2)
dXt3, dYt2, dW3, db3 = backprop_lstm_weights(dAt3, (St3, W, b))
dX[t] = dXt3
dY[t] = dYt2
return (dW + dW3, db + db3, dX), (dY, dCt2), (G, C, S), (W, b)
def lstm_weights_forward(Xt3, Yt2, W, b):
xp = get_array_module(Yt2)
St3 = xp.concatenate((Xt3, Yt2), axis=-1)
At3 = St3 @ W.T + b
return At3
def backprop_lstm_weights(dAt3, fwd_state):
St3, W, b = fwd_state
dW = dAt3.T @ St3
db = dAt3.sum(axis=0)
dSt3 = dAt3 @ W
nO = W.shape[0] // 4
nI = St3.shape[1] - nO
dXt3 = dSt3[:, :nI]
dYt2 = dSt3[:, nI:]
return dXt3, dYt2, dW, db
def lstm_gates_forward(At3, Ct2):
xp = get_array_module(At3)
# hf, hi, ho, hc: Forget, input, output, cell gates.
At3_hf, At3_hi, At3_ho, At3_hc = xp.split(At3, 4, axis=-1)
# Number the steps here, to refer back for backward pass.
# 1. Activations
hf = sigmoid(At3_hf) # 1a
hi = sigmoid(At3_hi) # 1b
ho = sigmoid(At3_ho) # 1c
hc = xp.tanh(At3_hc) # 1d
Ct3 = hf * Ct2 # 2a
Ct3 += hi * hc # 2b
tanhCt3 = xp.tanh(Ct3) # 3a
Yt3 = tanhCt3 * ho # 3b
# We don't need the gradient for this, it's just for backprop calculation.
Gt3 = xp.concatenate((hf, hi, ho, hc), axis=-1)
return Yt3, Ct3, Gt3
def backprop_lstm_gates(
dYt3: Array2d, dCt3: Array2d, Gt3: Array2d, Ct3: Array2d, Ct2: Array2d
) -> Tuple[Array3d, Array2d]:
# See above for notation. Step numbering refers to forward_lstm_gates
xp = get_array_module(dYt3)
hf, hi, ho, hc = xp.split(Gt3, 4, axis=-1)
tanhCt3 = xp.tanh(Ct3)
# 3b: Yt3 = tanhCt3 * ho
d_ho = dYt3 * tanhCt3
d_tanhCt3 = dYt3 * ho
# 3a: tanhCt3 = tanh(Ct3)
dCt3 += d_tanhCt3 * dtanh(tanhCt3)
# 2b: Ct3 += hi * hc
d_hi = dCt3 * hc
d_hc = dCt3 * hi
# 2a: Ct3 = hf * Ct2
d_hf = dCt3 * Ct2
dCt2 = dCt3 * hf
d_At3_hc = d_hc * dtanh(hc) # 1d
d_At3_ho = d_ho * dsigmoid(ho) # 1c
d_At3_hi = d_hi * dsigmoid(hi) # 1b
d_At3_hf = d_hf * dsigmoid(hf) # 1a
dAt3 = xp.concatenate((d_At3_hf, d_At3_hi, d_At3_ho, d_At3_hc), axis=-1)
return dAt3, dCt2
def sigmoid(X):
xp = get_array_module(X)
return 1.0 / (1.0 + xp.exp(-X))
def dsigmoid(Y: ArrayT) -> ArrayT:
return Y * (1.0 - Y)
def dtanh(Y: ArrayT) -> ArrayT:
return 1 - Y ** 2
| [
"[email protected]"
] | |
e07d63a1fbeffe6c57894e08f9d8cb4e1e015a6f | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_default_security_rules_operations.py | 1845d2fdfbb4b739407b79adfc58bf53747e21ea | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 8,956 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DefaultSecurityRulesOperations(object):
"""DefaultSecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.SecurityRuleListResult"]
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
default_security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.SecurityRule"
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security rule.
:type default_security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'} # type: ignore
| [
"[email protected]"
] | |
cc5efee86d9bd9204bbc9ff243e80878e33ea5a6 | ae4be4a17468f89e06975a402cddd7dabf692ec9 | /ABC/137/C/source.py | 5da7bff89fd85a546813bb268e62e676e9596f88 | [] | no_license | naru380/AtCoder | 95ae61230d3182dc2a317a77f8e9300c68443199 | 296d071d6a91ea7e061ee3923b5c26b0c7536119 | refs/heads/master | 2020-09-20T02:12:29.405393 | 2020-05-31T09:58:08 | 2020-05-31T09:58:08 | 224,354,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import itertools
from collections import Counter
import math
N = int(input())
def generate_prime_numbers():
search_range = 150
search_list = [True for i in range(0, search_range+1)]
search_list[0] = False
search_list[1] = False
search_list[2] = True
for i in range(2, search_range+1):
for j in range(i*2, search_range+1, i):
search_list[j] = False
prime_numbers = [i for i in range(search_range+1) if search_list[i] == True]
return prime_numbers[:27]
def combination(n, r):
return math.factorial(n) // math.factorial(r) // math.factorial(n-r)
prime_numbers = generate_prime_numbers()
encoded_strings = []
for i in range(N):
S = input()
encoded_string = 1
for c in S:
char_to_int = ord(c) - ord('a')
encoded_string *= prime_numbers[char_to_int]
encoded_strings.append(encoded_string)
# print(encoded_strings)
ans = 0
# for comb in itertools.combinations(encoded_strings, 2):
# if comb[0] == comb[1]:
# ans += 1
counter = Counter(encoded_strings)
for i in counter.values():
if i > 1:
ans += combination(i, 2)
print(ans)
| [
"[email protected]"
] | |
3b30c93eabcd27038c83049c2ee79ddeb97f9bac | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Binary Search/FirstBadVersion.py | 65537ce9473dd531e132bb495f34504fa9fb26fb | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 806 | py | """
LeetCode Problem 278. First Bad Version
Link: https://leetcode.com/problems/first-bad-version/
Written by: Mostofa Adib Shakib
Language: Python
Time Complexity: O(logn)
Space complexity: O(1)
"""
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
first = 0
last = n
while first <= last:
mid = (first+last)//2
if isBadVersion(mid) == False:
first = mid + 1
elif isBadVersion(mid) == True:
if isBadVersion(mid-1) == True:
last = mid - 1
else:
return mid | [
"[email protected]"
] | |
678256f0e9251afdae873f233eb56b60123f7369 | b0c02d7ca86c1ef84af18a8c701702e8bb212b64 | /display-stuff/neopixels/ColorSynthesis/Neopixel Color Synthesis/colorsynthesis1.py | dc3e92458492203c89b292ad1f19f38abeac0e08 | [] | no_license | flashypepo/myMicropython-Examples | 24fa2f372e68742abe0f74913df000dfe64a9e55 | b2b63df865b5ad471b351ca5f279135025859f5d | refs/heads/master | 2021-09-24T18:52:18.083444 | 2018-10-13T11:59:19 | 2018-10-13T11:59:19 | 98,223,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | # Micro/CircuitPython NeoPixel Color Synthesis Experiments pt. 1
import machine
import time
import math
import neopixel
NEOPIXEL_PIN = machine.Pin(15, machine.Pin.OUT)
NEOPIXEL_COUNT = 8 * 4 #12
def seconds():
return time.ticks_ms()/1000 # MicroPython code for current seconds
# Setup NeoPixels
pixels = neopixel.NeoPixel(NEOPIXEL_PIN, NEOPIXEL_COUNT)
def blank():
pixels.fill((0,0,0))
pixels.write()
blank()
''' Example 2:
amplitude = 128
frequency = 0.25 # Increase this to speed up, decrease to slow down the pulse.
phase = 0
offset = 128
try:
while True:
red = int(amplitude*math.sin(2*math.pi*frequency*seconds()+phase)+\
offset)
color = (red, 0, 0)
pixels.fill(color)
pixels.write()
print("r={}\tg={}\tb={}".format(*color))
time.sleep(0.1)
except:
blank()
print('done')
#'''
################################################################################
# Example 3:
# Refactor to a functional style. Create a sine wave function on the fly
# so it's easy to add more animations (just make more sine wave functions).
################################################################################
def sine_wave(amplitude, frequency, phase, offset):
return lambda t: amplitude*math.sin(2*math.pi*frequency*t+phase)+offset
red_wave = sine_wave(128, 0.25, 0, 128)
green_wave = sine_wave(128, 0.25, math.pi, 128)
try:
while True:
current = seconds()
red = int(red_wave(current))
green = int(green_wave(current))
color = (red, green, 0)
pixels.fill(color)
pixels.write()
print("r={}\tg={}\tb={}".format(*color))
time.sleep(0.1)
except:
blank()
print('done')
| [
"[email protected]"
] | |
78544477f1980b8197bbeb6369a8c22371a2db77 | a6203ce0f7f871ccd8fd341af6254795c938232b | /easy/power-of-two/solution.py | 08541ea6db52d29b1ee3005fc763fdc7559eb622 | [] | no_license | hsuanhauliu/leetcode-solutions | 542590de9b1dd4480bd582850363f71487dd37d0 | c14d8829c95f61ff6691816e8c0de76b9319f389 | refs/heads/master | 2021-03-31T00:31:18.489947 | 2019-10-21T03:51:10 | 2019-10-21T03:51:10 | 124,963,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n <= 0:
return False
while n != 1:
# keep dividing
if n % 2:
return False
n //= 2
return True | [
"[email protected]"
] | |
fa2426367d7e331041c267f0caa9af5a01f702f0 | 620323fc090cebaf7aca456ff3f7fbbe1e210394 | /psutil_example/get_win_services.py | 26361ecb1fd9c3ae7b2481a9ed2b4502e0765fd2 | [
"CC-BY-4.0"
] | permissive | gil9red/SimplePyScripts | bd2733372728bf9b9f00570e90316fa12116516b | 773c2c9724edd8827a1dbd91694d780e03fcb05a | refs/heads/master | 2023-08-31T04:26:09.120173 | 2023-08-30T17:22:59 | 2023-08-30T17:22:59 | 22,650,442 | 157 | 46 | null | 2023-09-08T17:51:33 | 2014-08-05T16:19:52 | Python | UTF-8 | Python | false | false | 853 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
# pip install psutil
import psutil
from psutil._pswindows import WindowsService
def get_win_services() -> list[WindowsService]:
return list(psutil.win_service_iter())
if __name__ == "__main__":
win_service_list = get_win_services()
print(f"Win service list ({len(win_service_list)}):")
for service in win_service_list:
title = f"{service.name()!r} ({service.display_name()})"
path = (
f"Pid={service.pid()}, name={service.name()!r}, display_name={service.display_name()!r}, "
f"status={service.status()!r}, start_type={service.start_type()!r}"
)
print("Title:", title)
print("Path:", path)
print("Status:", service.status())
print("binpath:", service.binpath())
print()
| [
"[email protected]"
] | |
5f34b64d875d3784f0e1740ec07bff206fac3a41 | 555eb9c234f86911df70188914d45c358c67bb62 | /tensorflow/python/keras/engine/base_layer_utils.py | b97326eea6a28ba9f1e466c1d59a43c7108bba19 | [
"Apache-2.0"
] | permissive | obeshor/tensorflow | 64b99bfec161e8680535104e7e90834b1060c5c3 | 0fd570848f7cd08904907640111d435dcb7fba8a | refs/heads/master | 2020-05-18T09:44:13.516187 | 2019-04-30T20:33:02 | 2019-04-30T21:32:19 | 184,335,557 | 2 | 1 | Apache-2.0 | 2019-04-30T21:43:01 | 2019-04-30T21:43:00 | null | UTF-8 | Python | false | false | 24,570 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains private utilities used mainly by the base Layer class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import threading
import enum
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
_call_context = threading.local()
class CallConvention(enum.Enum):
"""Calling conventions for passing `Layer` inputs to `Layer.call`."""
# The Layer takes inputs as its first argument, named "inputs" for
# compatibility with the signature of Layer.__call__. This is the mode assumed
# for Layers which are not subclassed Models.
EXPLICIT_INPUTS_ARGUMENT = 1
# The Layer takes a single positional argument, not named "inputs". It's
# treated like an "inputs" argument.
SINGLE_POSITIONAL_ARGUMENT = 2
# The Layer has multiple positional arguments to which its inputs should be
# bound.
POSITIONAL_ARGUMENTS_ARE_INPUTS = 3
def create_mean_metric(value, name=None):
# TODO(psv): Remove this import when b/110718070 is fixed.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.distribute import distributed_training_utils # pylint: disable=g-import-not-at-top
metric_obj = metrics_module.Mean(name=name)
return (metric_obj,
distributed_training_utils.call_replica_local_fn(metric_obj, value))
def make_variable(name,
shape=None,
dtype=dtypes.float32,
initializer=None,
trainable=None,
caching_device=None,
validate_shape=True,
constraint=None,
use_resource=None,
collections=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
partitioner=None): # pylint: disable=unused-argument
"""Temporary util to create a variable (relies on `variable_scope.variable`).
Some reuse-related technicalities prevent us from using
`variable_scope.get_variable()` directly, so we use a subcomponent
that has fewer constraints (`variable_scope.variable()`).
In the longer term, it seems like a similar "default variable creator" method
should exist in `Trackable` instead. When this happens, we can get
rid of this temporary solution.
TODO(fchollet): remove this method when no longer needed.
Arguments:
name: Variable name.
shape: Variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
trainable: Whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
caching_device: Passed to `tf.Variable`.
validate_shape: Passed to `tf.Variable`.
constraint: Constraint instance (callable).
use_resource: Whether to use a `ResourceVariable`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: Not handled at this time.
Returns:
Variable instance.
"""
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(
initializer,
(type(init_ops.Initializer), type(init_ops_v2.Initializer))):
initializer = initializer()
init_val = lambda: initializer(shape, dtype=dtype)
variable_dtype = dtype.base_dtype
if use_resource is None:
use_resource = True
# TODO(apassos,rohanj) figure out how to remove collections from here so we
# can remove the V1.
v = tf_variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
collections=collections,
synchronization=synchronization,
aggregation=aggregation)
return v
def get_default_graph_uid_map():
# TODO(fchollet): refactor this into backend.
graph = ops.get_default_graph()
name_uid_map = backend.PER_GRAPH_LAYER_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections_lib.defaultdict(int)
backend.PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map
return name_uid_map
def unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace='',
zero_based=False):
"""Makes a layer name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def collect_previous_mask(input_tensors):
"""Retrieves the output mask(s) of the previous node.
Arguments:
input_tensors: An arbitrary structure of Tensors.
Returns:
A mask tensor or list of mask tensors.
"""
def _collect_previous_mask(x):
return getattr(x, '_keras_mask', None)
return nest.map_structure(_collect_previous_mask, input_tensors)
def have_all_keras_metadata(tensors):
return all(hasattr(x, '_keras_history') for x in nest.flatten(tensors))
def generate_placeholders_from_shape(shape):
return array_ops.placeholder(shape=shape, dtype=backend.floatx())
def create_keras_history(tensors):
"""Wraps TensorFlow Operations for compatibility with the Functional API.
This method checks to see if a Tensor in `tensors` is missing Keras metadata
and has its origin in a Keras `Input` Layer. If so, this method will replace
the raw TensorFlow Operations that created this tensor with
`TensorFlowOpLayer` instances that create identical operations.
Any Tensors not originating from a Keras `Input` Layer will be treated as
constants when constructing `TensorFlowOpLayer` instances.
Arguments:
tensors: A structure of Tensors, some of which come from raw TensorFlow
operations and need to have Keras metadata assigned to them.
Returns:
keras_tensors: The Tensors found that came from a Keras Layer.
"""
_, created_layers = _create_keras_history_helper(tensors, set(), [])
return created_layers
def _create_keras_history_helper(tensors, processed_ops, created_layers):
"""Helper method for `create_keras_history`.
Arguments:
tensors: A structure of Tensors for which to create Keras metadata.
processed_ops: Set. TensorFlow operations that have already been wrapped in
`TensorFlowOpLayer` instances.
created_layers: List. The `TensorFlowOpLayer` instances created.
Returns:
Tuple. First element is the updated set of TensorFlow Operations that
have been wrapped in `TensorFlowOpLayer` instances. Second element is
a list of the `TensorFlowOpLayer` instances created.
"""
# Import of `base_layer` needed in order to create `TensorFlowOpLayer`.
# Cannot be imported at top because of circular dependencies.
# TODO(omalleyt): Resolve circular dependency.
from tensorflow.python.keras.engine import base_layer # pylint: disable=g-import-not-at-top
tensor_list = nest.flatten(tensors)
for tensor in tensor_list:
if getattr(tensor, '_keras_history', None) is not None:
continue
op = tensor.op # The Op that created this Tensor.
if op not in processed_ops:
# Recursively set `_keras_history`.
op_inputs = list(op.inputs)
constants = {}
layer_inputs = []
for i, op_input in enumerate(op_inputs):
if uses_keras_history(op_input):
layer_inputs.append(op_input)
else:
# Treat any value not originating from a `keras.Input` as
# a constant. Variables cannot be supported.
if (distribution_strategy_context.in_cross_replica_context() and
not ops.executing_eagerly_outside_functions()):
# In Legacy Graph mode, evaluating here makes Session be
# configured improperly.
constants[i] = op_input
else:
constants[i] = backend.function([], op_input)([])
processed_ops, created_layers = _create_keras_history_helper(
layer_inputs, processed_ops, created_layers)
name = op.name
node_def = op.node_def.SerializeToString()
op_layer = base_layer.TensorFlowOpLayer(
node_def, constants=constants, name=name)
created_layers.append(op_layer)
op_layer._add_inbound_node( # pylint: disable=protected-access
layer_inputs, op.outputs)
processed_ops.update([op])
return processed_ops, created_layers
def needs_keras_history(tensors):
"""Check if any Tensors need to be wrapped in TensorFlowOpLayers.
This will never return True inside a sublayer, because sublayers
do not need to create Keras History. Otherwise, this returns True
if one or more of `tensors` originates from a `keras.Input` and
does not have `_keras_history` set.
Arguments:
tensors: An arbitrary nested structure of Tensors.
Returns:
Bool, whether at least one Tensor needs to be wrapped.
"""
input_tensors = nest.flatten(tensors)
if is_in_call_context() or all(
getattr(tensor, '_keras_history', None) is not None
for tensor in input_tensors):
# KerasHistory already set.
return False
return uses_keras_history(tensors)
def is_in_call_context():
"""Returns true if inside of a model/layer '__call__'."""
return getattr(_call_context, 'in_call', False)
def is_in_frozen_context():
"""Returns if currently executing inside a `call` of a frozen Layer.
A Layer is considered frozen if `layer.trainable=False`.
Returns:
Whether currently inside the `call` of a frozen Layer.
"""
return getattr(_call_context, 'frozen', False)
def uses_keras_history(tensors):
"""Check if at least one Tensor originates from a `keras.Input`.
This is `True` if at least one Tensor has its origin in a `keras.Input`.
Any Tensor that originates from a `keras.Input` will have a dependency
Tensor with a `_keras_history` attribute attached. Tensors that have
already been checked to not originate from a `keras.Input`
are marked as `_keras_history_checked`.
Arguments:
tensors: An arbitrary nested structure of Tensors.
Returns:
Bool, whether at least one Tensor originates from a `keras.Input`.
"""
checked_tensors = set()
tensors_to_check = nest.flatten(tensors)
while tensors_to_check:
new_tensors_to_check = set()
for tensor in tensors_to_check:
if getattr(tensor, '_keras_history_checked', None) is not None:
continue
if getattr(tensor, '_keras_history', None) is not None:
return True
try:
new_tensors_to_check.update(tensor.op.inputs)
except AttributeError:
# In case `tensor` is a Variable created in an Eager context.
pass
checked_tensors.update(tensors_to_check)
tensors_to_check = list(new_tensors_to_check - checked_tensors)
# Mark that these Tensors have been checked once for `_keras_history`,
# and should not be checked again for performance reasons.
mark_checked(tensors)
return False
def mark_checked(tensors):
"""Marks that these Tensors should not be tracked.
This prevents Layers from attempting to create TensorFlowOpLayers
for these Tensors.
Arguments:
tensors: An arbitrary structure of Tensors.
"""
def _mark_checked(tensor):
tensor._keras_history_checked = True # pylint: disable=protected-access
nest.map_structure(_mark_checked, tensors)
@tf_contextlib.contextmanager
def call_context(layer):
"""Scope that marks when we are currently inside a Layer/Model's `call`."""
was_in_call = is_in_call_context()
was_frozen = is_in_frozen_context()
_call_context.in_call = True
if not layer.trainable:
_call_context.frozen = True
try:
yield
finally:
_call_context.in_call = was_in_call
_call_context.frozen = was_frozen
def training_arg_passed_to_call(argspec, args, kwargs):
"""Returns whether a user passed the `training` argument in `__call__`."""
# `argspec.args` starts with ['self', 'inputs']
full_args = dict(zip(argspec.args[2:], args))
full_args.update(kwargs)
return 'training' in full_args
class AutoAddUpdates(object):
"""Automatically track stateful ops with `add_update`.
This context manager is used to automatically add stateful ops to a Layer
or Model's `.updates`. This ensures that stateful ops are run in the Keras
training loop. It also allows for these stateful ops to be disabled by
setting `trainable=False`.
Example:
```
with AutoAddUpdates(layer, inputs) as auto_updates:
outputs = layer.call(inputs)
auto_updates.set_outputs(outputs)
```
Attributes:
layer: Layer or Model instance to add the updates to.
inputs: The inputs to this Layer or Model, to be used for input-conditional
updates.
outputs: The outputs of this Layer or Model.
"""
def __init__(self, layer, inputs):
self.layer = layer
self.inputs = inputs
self.outputs = []
def set_outputs(self, outputs):
if self.outputs:
raise RuntimeError('`set_outputs` should only be called once on an'
'`AutoAddUpdates` instance.')
self.outputs = outputs
def __enter__(self):
# Only run in V2 Function mode.
if (context.executing_eagerly() or
not ops.executing_eagerly_outside_functions()):
return self
self._graph = ops.get_default_graph()
self._num_operations = len(self._graph.get_operations())
return self
def __exit__(self, error_type, unused_value, unused_traceback):
if error_type:
# Allow errors that occurred inside this context manager to pass through
# normally.
return
# Only run in V2 Function mode.
if (context.executing_eagerly() or
not ops.executing_eagerly_outside_functions()):
return
if (self._graph is not ops.get_default_graph() or
self._graph.name != 'keras_graph'):
# Only auto-track updates when the Keras Graph is the only one used.
return
new_operations = self._graph.get_operations()[self._num_operations:]
new_stateful_ops = set()
# pylint: disable=protected-access
for op in new_operations:
# While loop is not supported in general for automatic control
# dependencies.
if control_flow_util.IsInWhileLoop(op):
continue
# Track stateful ops via `add_update`.
is_stateful_op = (
op.type not in self._graph._registered_ops or
auto_control_deps.op_is_stateful(
self._graph._registered_ops[op.type]))
# Ignore ReadVariableOps as they are not needed to be run separately.
# This ensures existing Layers don't get extra updates.
if is_stateful_op and op.type != 'ReadVariableOp':
new_stateful_ops.add(op)
explicit_updates = set(
[u for u in self.layer.updates if not isinstance(u, tuple)])
# pylint: enable=protected-access
# Don't add updates that will already be run by virtue of being consumed by
# other stateful ops or by the Layer's outputs. This ensures that existing
# Layers like `BatchNormalization` continue to return the same values for
# `.update` calls.
minimum_ops = set()
targets = new_stateful_ops.union(
set(nest.flatten(self.outputs)), explicit_updates)
for op in new_stateful_ops:
# Scrub any ops that are consumed by the outputs or other stateful ops.
reachable = tf_utils.get_reachable_from_inputs(op)
if not (targets - {op}).intersection(reachable):
minimum_ops.add(op)
new_stateful_ops = minimum_ops
# Don't double-track updates added via explicitly calling `add_update`.
# Also don't double-track updates already tracked in sublayers.
new_stateful_ops = new_stateful_ops - explicit_updates
# Decide whether to track as input-conditional or unconditional.
input_reachable_ops = tf_utils.get_reachable_from_inputs(
self.inputs, targets=new_stateful_ops)
unconditional_updates = new_stateful_ops - input_reachable_ops
conditional_updates = new_stateful_ops - unconditional_updates
if unconditional_updates:
self.layer.add_update(list(unconditional_updates))
if conditional_updates:
self.layer.add_update(list(conditional_updates), inputs=self.inputs)
def _get_var_read_dtype(input_list, should_cast):
"""Gets the dtype that AutoCastVariables should be read in."""
if should_cast and input_list and input_list[0].dtype.is_floating:
return input_list[0].dtype.base_dtype
else:
return None
def autocast_context_manager(input_list, should_cast):
"""Returns a context manager to autocast AutoCastVariables.
Under this context manager, if `should_cast` is True, AutoCastVariables will
be casted. If `should_cast` is False, AutoCastVariables will not be casted,
which can be used to disable autocasting if nested under another
call to `autocast_context_manager`.
Args:
input_list: The inputs to the layer with the AutoCastVariables.
should_cast: Whether AutoCastVariables should be casted.
Returns:
A context manager to automatically cast AutoCastVariables.
"""
var_read_dtype = _get_var_read_dtype(input_list, should_cast)
return ops.get_default_graph()._enable_auto_casting_variables( # pylint: disable=protected-access
var_read_dtype)
def is_subclassed(layer):
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
def check_graph_consistency(tensor, method):
"""Checks that tensors passed to `add_*` method match the Keras graph.
When one of the `add_*` method is called inside a V2 conditional branch,
the underlying tensor gets created in a FuncGraph managed by control_flow_v2.
We need to raise clear error messages in such cases.
Arguments:
tensor: Tensor to check.
method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.
Raises:
RuntimeError: In case of an out-of-graph tensor.
"""
if ops.executing_eagerly_outside_functions() and hasattr(tensor, 'graph'):
if isinstance(tensor.graph,
(control_flow_util_v2.CondBranchFuncGraph,
control_flow_util_v2.WhileCondFuncGraph,
control_flow_util_v2.WhileBodyFuncGraph)):
if method == 'add_metric':
bad_example = """
def call(self, inputs, training=None):
if training:
metric = compute_metric(inputs)
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
metric = compute_metric(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
"""
elif method == 'add_loss':
bad_example = """
def call(self, inputs, training=None):
if training:
loss = compute_loss(inputs)
self.add_loss(loss)
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
loss = compute_loss(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
"""
else:
bad_example = """
def call(self, inputs, training=None):
if training:
self.add_update(self.w.assign_add(1))
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
increment = 1
else:
increment = 0
self.add_update(self.w.assign_add(increment))
return inputs
"""
raise RuntimeError(
'You are using the method `{method}` in a control flow branch '
'in your layer, e.g.:\n{bad_example}\n'
'This is not currently supported. '
'You should either use static control flow (`tf.cond`) '
'or move your call to {method} out of the control flow branch, '
'e.g.:\n{correct_example}\n'
'You can also resolve this by marking your layer '
'as dynamic (eager-only) by passing '
'`dynamic=True` to the layer constructor. '
'Any kind of control flow is supported with dynamic layers. '
'Note that using `dynamic=True` requires you '
'to implement static shape inference '
'in the `compute_output_shape(input_shape)` method.'.format(
method=method,
bad_example=bad_example,
correct_example=correct_example))
| [
"[email protected]"
] | |
80ecbb277308e7fb1010e5ec65eb4683e140c3fe | feba3c32aac7f17d8fbaf6ef7bb4d229844f8247 | /machine_learning/clustering/hierarchical_clustering/AgglomerativeClustering/main.py | c44aad4338e74004ce5203e18522385184a3123b | [] | no_license | lisunshine1234/mlp-algorithm-python | d48aa1336ae7c4925a0e30f4f09fa6de21f83d0e | 898359a10f65f16e94f3bb27cc61f3837806ca68 | refs/heads/master | 2023-05-01T11:11:47.465491 | 2021-05-24T13:53:40 | 2021-05-24T13:53:40 | 291,934,886 | 0 | 0 | null | 2021-05-24T13:42:15 | 2020-09-01T08:00:17 | Python | UTF-8 | Python | false | false | 4,604 | py | import numpy as np
import run as r
'''
[id]
145
[name]
AgglomerativeClustering
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
n_clusters 簇数 默认为2,要查找的集群数。如果'None'不是'distance_threshold',则必须为'None',可选整数 整数 不必须 定数
affinity 亲和力 默认为'euclidean',用于计算链接的度量。可以是'euclidean','l1','l2','manhattan','cosine'或'precomputed'。如果链接为'ward',则仅接受'euclidean'。如果为'precomputed',则需要距离矩阵(而不是相似度矩阵)作为拟合方法的输入,可选'euclidean' 字符串 不必须 定数
memory memory 默认为None,用于缓存树计算的输出。默认情况下,不进行缓存。如果给出了字符串,则它是缓存目录的路径,可选整数,字符串 字符串 不必须 定数
connectivity 连通性 默认为None,连接矩阵。为每个样本定义遵循给定数据结构的相邻样本。这可以是连通性矩阵本身,也可以是将数据转换为连通性矩阵(例如从kneighbors_graph派生)的可调用对象。默认值为None,即分层聚类算法是非结构化的,可选数组 不定数组 不必须 定数
compute_full_tree 计算全树 默认为auto,尽早在n_clusters处停止树的构建。还要注意的是,当更改群集数量并使用缓存时,计算完整树可能是有利的。如果'True'不是'distance_threshold',则必须为'None'。默认情况下,'compute_full_tree'是'auto',当'True'不是'distance_threshold'或'None'次于100或'n_clusters'之间的最大值时,等于'0.02 * n_samples'。否则,'auto'等于'False',可选布尔值,'auto' 字符串 不必须 定数
linkage 链接标准 默认为ward,使用哪个链接标准。链接标准确定要在观察组之间使用的距离。该算法将合并最小化此标准的成对集群。-ward将合并的簇的方差最小化。-平均使用两组的每个观测值的距离的平均值。-完全或最大链接使用两组所有观测值之间的最大距离。-single使用两组的所有观测值之间的最小距离,可选'ward','average','single','complete' 字符串 不必须 定数
distance_threshold 距离阈值 默认为None,链接距离阈值,超过该距离时,群集将不会合并。如果不是'None',则'n_clusters'必须为'None',而'compute_full_tree'必须为'True',可选浮点数 浮点数 不必须 定数
[output]
n_clusters_ 簇数 该算法找到的簇数。如果为'distance_threshold=None',则等于给定的'n_clusters' 整数
labels_ 标签 每个点的聚类标签 一维数组
n_leaves_ 叶子数 层次树中的叶数 整数
n_connected_components_ 组件连接数 图中估计的已连接组件数 整数
children_ children_ 每个非叶节点的子级。小于'n_samples'的值对应于作为原始样本的树的叶子。大于或等于'i'的节点'n_samples'是非叶子节点,并且具有子节点'children_[i - n_samples]'。或者,在第i次迭代中,children [i] [0]和children [i] [1]合并以形成节点'n_samples + i 二维数组
[outline]
聚集聚类以递归方式合并这对最小增加给定链接距离的聚类对。
[describe]
聚集聚类以递归方式合并这对最小增加给定链接距离的聚类对。
'''
def main(x_train, y_train,
n_clusters=2, affinity="euclidean", memory=None, connectivity=None, compute_full_tree='auto', linkage='ward', distance_threshold=None
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(n_clusters) is str:
n_clusters = eval(n_clusters)
if type(connectivity) is str:
connectivity = eval(connectivity)
if type(distance_threshold) is str:
distance_threshold = eval(distance_threshold)
return r.run(x_train=x_train, y_train=y_train, n_clusters=n_clusters,
affinity=affinity,
memory=memory,
connectivity=connectivity,
compute_full_tree=compute_full_tree,
linkage=linkage,
distance_threshold=distance_threshold)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back) | [
"[email protected]"
] | |
1d90ee6dc0cce81b7ee6e5ebc395a18ae771e9a8 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/navigation/__init__.pyi | 50c892e49919e06c449a9ea0c91e410ecd8bb2e3 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 195 | pyi | from .GoToAddressLabelDialog import GoToAddressLabelDialog as GoToAddressLabelDialog
from .GoToQuery import GoToQuery as GoToQuery
from .GoToServiceImpl import GoToServiceImpl as GoToServiceImpl
| [
"[email protected]"
] | |
a0a362fb3cf297d127447c05947dad5d44f76ce3 | ebee11af2d66615a2c5c97b4dbffcfc142ee40bb | /torchgen/static_runtime/config.py | bfcab625e2e366812c0a4ece160deff367cf4487 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | timgates42/pytorch | e600e945a366223232ff4d4ddcafe659fdefc0cf | a25df29cc4a64bfc75cf3415cb941ae66ef22201 | refs/heads/master | 2023-03-15T17:57:17.622007 | 2022-07-13T17:01:11 | 2022-07-13T20:24:37 | 227,502,054 | 0 | 0 | NOASSERTION | 2019-12-12T02:22:11 | 2019-12-12T02:22:11 | null | UTF-8 | Python | false | false | 13,607 | py | from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
from typing import Dict, Union
def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
if isinstance(g, NativeFunctionsGroup):
return str(g.functional.func.name.name.base)
else:
return str(g.view.root_name)
is_hand_written_ops_ = frozenset(
(
"abs",
"add",
"addmm",
"all",
"any",
"argmin",
"bmm",
"clamp",
"clamp_min",
"cumsum",
"div",
"fmod",
"index_select",
"leaky_relu",
"linear",
"log",
"matmul",
"mul",
"narrow_copy",
"nonzero",
"pow",
"remainder",
"sigmoid",
"sign",
"sub",
"tanh",
"detach",
"expand_as",
"flatten",
"narrow",
"reshape_as",
"select",
"slice",
"softmax",
"split",
"squeeze",
"transpose",
"view",
"where",
)
)
def is_hand_written(g: NativeFunctionsGroup) -> bool:
name_base = func_name_base_str(g)
return name_base in is_hand_written_ops_
def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
assert index == 0 or index == 1
if op_name == "addr":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["vec1"] = "at::rand({6})"
arg_map["vec2"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["vec1"] = "at::rand({22})"
arg_map["vec2"] = "at::rand({22})"
return
if op_name == "mv":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["vec"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["vec"] = "at::rand({22})"
return
if op_name == "addbmm":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
else:
arg_map["self"] = "at::rand({22, 22})"
return
if op_name == "cross":
if index == 0:
arg_map["self"] = "at::rand({3, 3, 3})"
arg_map["other"] = "at::rand({3, 3, 3})"
else:
arg_map["self"] = "at::rand({22, 3, 22})"
arg_map["other"] = "at::rand({22, 3, 22})"
return
if op_name == "take":
if index == 0:
arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
else:
arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
return
if op_name == "take_along_dim":
if index == 0:
arg_map["indices"] = "at::argsort(self0, 1, true)"
else:
arg_map["indices"] = "at::argsort(self1, 1, true)"
return
if op_name == "masked_select":
if index == 0:
arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
else:
arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
return
if op_name == "orgqr":
if index == 0:
arg_map["input2"] = "at::rand({6, 6})"
else:
arg_map["input2"] = "at::rand({22, 22})"
return
if op_name == "ormqr":
if index == 0:
arg_map["input2"] = "at::rand({6, 6})"
else:
arg_map["input2"] = "at::rand({22, 22})"
return
if op_name == "quantile":
if index == 0:
arg_map["q"] = "at::rand({6})"
arg_map["interpolation"] = '"linear"'
else:
arg_map["q"] = "at::rand({22})"
arg_map["interpolation"] = '"linear"'
return
if op_name == "nanquantile":
if index == 0:
arg_map["q"] = "at::rand({6})"
arg_map["interpolation"] = '"linear"'
else:
arg_map["q"] = "at::rand({22})"
arg_map["interpolation"] = '"linear"'
return
if op_name == "multi_margin_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name == "multilabel_margin_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
return
if op_name == "nll_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name == "nll_loss2d":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6, 6})"
arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22, 22, 22})"
arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name in (
"fft_fft",
"fft_ifft",
"fft_rfft",
"fft_irfft",
"fft_hfft",
"fft_ihfft",
):
arg_map["norm"] = '"forward"'
return
if op_name == "linalg_tensorinv":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6, 6})"
arg_map["ind"] = "2"
else:
arg_map["self"] = "at::rand({22, 22, 22, 22})"
arg_map["ind"] = "2"
return
if op_name == "addmv":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["mat"] = "at::rand({2, 2})"
arg_map["vec"] = "at::rand({2})"
else:
arg_map["self"] = "at::rand({35})"
arg_map["mat"] = "at::rand({35, 35})"
arg_map["vec"] = "at::rand({35})"
return
if op_name == "acosh":
if index == 0:
arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
else:
arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
return
if op_name == "adaptive_max_pool2d_backward":
if index == 0:
arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2})"
arg_map["self"] = "at::randint(-3, 2, {2,2,2})"
arg_map["indices"] = "at::randint(0, 1, {2,2,2}, at::kLong)"
else:
arg_map["grad_output"] = "at::randint(-3, 3, {3,3,3})"
arg_map["self"] = "at::randint(-3, 2, {3,3,3})"
arg_map["indices"] = "at::randint(0, 1, {3,3,3}, at::kLong)"
return
if op_name == "adaptive_max_pool3d_backward":
if index == 0:
arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2,2})"
arg_map["self"] = "at::randint(-3, 2, {2,2,2,2})"
arg_map["indices"] = "at::randint(0, 1, {2,2,2,2}, at::kLong)"
else:
arg_map["grad_output"] = "at::randint(-3, 3, {3,3,3,3})"
arg_map["self"] = "at::randint(-3, 2, {3,3,3,3})"
arg_map["indices"] = "at::randint(0, 1, {3,3,3,3}, at::kLong)"
return
if op_name == "gather":
if index == 0:
arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
arg_map["dim"] = "1"
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
arg_map["sparse_grad"] = "false"
else:
arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
arg_map["dim"] = "1"
arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
arg_map["sparse_grad"] = "false"
return
if op_name == "gelu":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6})"
arg_map["approximate"] = '"tanh"'
else:
arg_map["self"] = "at::rand({22, 22, 22})"
arg_map["approximate"] = '"tanh"'
return
if op_name == "gelu_backward":
if index == 0:
arg_map["grad_output"] = "at::rand({6, 6, 6})"
arg_map["self"] = "at::rand({6, 6, 6})"
arg_map["approximate"] = '"tanh"'
else:
arg_map["grad_output"] = "at::rand({22, 22, 22})"
arg_map["self"] = "at::rand({22, 22, 22})"
arg_map["approximate"] = '"tanh"'
return
if op_name == "index_add":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
arg_map["source"] = "at::rand({2})"
arg_map["alpha"] = "2"
else:
arg_map["self"] = "at::rand({16})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
arg_map["source"] = "at::rand({16})"
arg_map["alpha"] = "2"
return
if op_name == "index_copy":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
arg_map["source"] = "at::rand({2})"
else:
arg_map["self"] = "at::rand({32})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
arg_map["source"] = "at::rand({32})"
return
if op_name == "linalg_cross":
if index == 0:
arg_map["self"] = "at::rand({6, 3, 6})"
arg_map["other"] = "at::rand({6, 3, 6})"
arg_map["dim"] = "1"
else:
arg_map["self"] = "at::rand({22, 3, 22})"
arg_map["other"] = "at::rand({22, 3, 22})"
arg_map["dim"] = "1"
return
if op_name == "nll_loss_backward":
if index == 0:
arg_map["grad_output"] = "at::rand({})"
arg_map["self"] = "at::rand({6})"
arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
arg_map["reduction"] = "1"
arg_map["ignore_index"] = "1"
arg_map["total_weight"] = "at::rand({})"
else:
arg_map["grad_output"] = "at::rand({})"
arg_map["self"] = "at::rand({36})"
arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
arg_map["weight"] = "at::rand({36})"
arg_map["reduction"] = "1"
arg_map["ignore_index"] = "1"
arg_map["total_weight"] = "at::rand({})"
return
if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
if index == 0:
arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
else:
arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
if "reduce" in arg_map:
arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
return
if op_name == "scatter_reduce":
arg_map["reduce"] = '"mean"'
if index == 0:
arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
else:
arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
return
if op_name == "special_zeta":
if index == 0:
arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
else:
arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
return
if op_name == "_convert_indices_from_csr_to_coo":
if index == 0:
arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
arg_map["out_int32"] = "false"
else:
arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
arg_map[
"col_indices"
] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
arg_map["out_int32"] = "false"
return
if op_name == "_convert_indices_from_coo_to_csr":
if index == 0:
arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
arg_map["size"] = "10"
arg_map["out_int32"] = "false"
else:
arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
arg_map["size"] = "24"
arg_map["out_int32"] = "false"
return
if op_name in ("diagonal", "linalg_diagonal"):
arg_map["offset"] = "0"
arg_map["dim0"] = "1"
arg_map["dim1"] = "2"
return
| [
"[email protected]"
] | |
6f7c3eebdf06407cee5d8e9e62976c7a454ff836 | e3a25b40812b6b70f10b52a6f66f9348dcc251a6 | /algorithm/0402codeAD/구슬고르기복습.py | ae0fd9e3bf77bee8c5bdacf9d2e3d4790c8f7305 | [] | no_license | yoonwoo123/python101 | 75643cb5dcf411c9ddcf988bf09bb88e4523206c | 637dce64a6320a6f46eb941e33e8e9f6ee41c910 | refs/heads/master | 2020-04-14T11:30:43.018126 | 2019-07-25T08:28:31 | 2019-07-25T08:28:31 | 163,815,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | import sys
sys.stdin = open("구슬고르기_input.txt")
# 김은경 샘 코드
def DFS1(n): # 중복순열
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(1, 7):
arr[n]=i
DFS1(n+1)
def DFS3(n): # 순열
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(1, 7):
if chk[i]:continue
chk[i]=1 # 순열은 체크 해야함
arr[n]=i
DFS3(n+1)
chk[i]=0 # 순열은 체크해제도 해야함
def DFS2(n, start): # 중복조합
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(start, 7): # 시작은 스타트부터
arr[n]=i
DFS2(n+1, i) # 스타트업데이트는 start가 아닌 i
def DFS4(n, start): # 조합
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(start, 7):
arr[n]=i
DFS4(n+1, i+1) # 조합은 i 가 아닌 i + 1 중요!
#main---------------------------------
N, M = map(int, input().split())
arr =[0] * (N+1)
chk = [0] * 7
if M ==1: DFS1(1)
elif M ==3 : DFS3(1)
elif M == 2: DFS2(1, 1)
elif M ==4: DFS4(1,1)
# def ovperm(n, k):
# if n == k:
# for g in p:
# print(g, end=" ")
# print()
# else:
# for i in range(k, n):
# a[i], a[k] = a[k], a[i]
# p[k] = a[i]
# perm(n, k+1)
# # perm(n-1, k+1)
# a[i], a[k] = a[k], a[i]
#
# def DFS(no): # chk를 하면 순열 chk를 하지 않으면 중복순열
# if no >= N:
# for i in range(N):
# print(b[i], end=" ")
# print()
# return
# for i in range(6):
# # if chk[i]:continue # 1이면 continue, 0이면 진행
# # chk[i] = 1
# b[no] = a[i]
# DFS(no + 1)
# # chk[i] = 0
#
# def comb(no):
# if no >= N:
# for i in range(N):
# print(b[i], end=" ")
# print()
# return
# b[no] = a[no]
# comb(no + 1)
# b[no] = 0
# comb(no + 1)
#
# # def combs(no, start): # a[no]번째 구슬을 상자에 담거나 담지 않는 모든 경우
# # for i in range(N): print(b[i], end=" ")
# # print()
# # if no >= N or start >= N:
# # return
# # for i in range(start, N):
# # b[no] = a[i]
# # combs(no+1, i+1)
# # b[no] = 0
#
# N = int(input())
# a = [n for n in range(1, 7)]
# b = [0] * N
# chk = [0] * N
# # DFS(0)
# # comb(0)
# DFS(0)
| [
"[email protected]"
] | |
8f5b53674caa26cd827c4943842f96a981027ade | 386a5b505d77c9798aaab78495d0f00c349cf660 | /python/function/harmonic.py | f23e439bde8eccf7c61bf23d64a8e3c28998c89d | [] | no_license | namratarane20/MachineLearning | 2da2c87217618d124fd53f607c20641ba44fb0b7 | b561cc74733b655507242cbbf13ea09a2416b9e2 | refs/heads/master | 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 | Python | UTF-8 | Python | false | false | 431 | py | #this program is used print the nth harmonic value
from data import functional
try:
value = int(input("enter the value: "))
if value > 0: # if value is more than 0 it will run the method
functional.harmonic(value)
else:print("enter more than 0")
except ValueError: # if is not numerical value it will throw the error
print("enter the proper input") | [
"[email protected]"
] | |
b97cffb7e0a43919de823cb6cf823479aa0bc268 | a2ee667a402d821831ce1532c3a2e62305624388 | /extras/sample_site/sample_site/urls.py | b391adc051ec60ccb38c358017504169712765ab | [
"MIT"
] | permissive | cltrudeau/django-flowr | 9c1c7c8a43d881f962e8dd58ca424daa3ee1348a | ea2d69fda94d1998f48301954f8dc69f0b553553 | refs/heads/master | 2023-07-05T20:28:05.370538 | 2023-06-29T19:36:41 | 2023-06-29T19:36:41 | 40,761,586 | 3 | 0 | MIT | 2022-12-26T19:50:47 | 2015-08-15T13:37:23 | Python | UTF-8 | Python | false | false | 185 | py | from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^flowr/', include('flowr.urls')),
]
| [
"[email protected]"
] | |
b4be61d8b86b193478f3cf286e713cde26bb27d9 | 7e7a1a1c7f5a2069b50b90b247d89faef17b7eef | /test/unit/test_make.py | 32d6c20acbcdafa123544c60d5ce8704b4b77154 | [
"BSD-3-Clause"
] | permissive | JulianVolodia/bfg9000 | e1d13e07ef43577ce871cbdf28d7854eaad9985e | c04867cd7fc4861bc67fe38f9ca47ee6cc43edef | refs/heads/master | 2021-01-11T12:16:38.842893 | 2016-12-11T21:16:52 | 2016-12-12T01:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | import os
import unittest
from six.moves import cStringIO as StringIO
from bfg9000 import path
from bfg9000 import safe_str
from bfg9000.backends.make.syntax import *
from bfg9000.platforms import platform_name
esc_colon = ':' if platform_name() == 'windows' else '\\:'
def quoted(s):
return "'" + s + "'"
class TestMakeWriter(unittest.TestCase):
# strings
def test_write_string_target(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.target)
self.assertEqual(out.stream.getvalue(),
'foo' + esc_colon + '\\ $$bar|baz,quux')
def test_write_string_dependency(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.dependency)
self.assertEqual(out.stream.getvalue(),
'foo' + esc_colon + '\\ $$bar\\|baz,quux')
def test_write_string_function(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.function)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar|baz$,quux'))
def test_write_string_shell(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar|baz,quux'))
def test_write_string_clean(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $$bar|baz,quux')
# escaped strings
def test_write_escaped_string_target(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.target)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_dependency(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'),
Syntax.dependency)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_function(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.function)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_shell(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.shell)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_clean(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
# jbos
def test_write_jbos_target(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.target)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_write_jbos_dependency(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.dependency)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_write_jbos_function(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.function)
self.assertEqual(out.stream.getvalue(), quoted('$$foo') + '$bar')
def test_write_jbos_shell(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('$$foo') + '$bar')
def test_write_jbos_clean(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.clean)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
# paths
def test_write_path_target(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.target)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
def test_write_path_dependency(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.dependency)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
def test_write_path_function(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.function)
self.assertEqual(out.stream.getvalue(),
quoted(os.path.join('$(srcdir)', 'foo')))
def test_write_path_shell(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.shell)
self.assertEqual(out.stream.getvalue(),
quoted(os.path.join('$(srcdir)', 'foo')))
def test_write_path_clean(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.clean)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
| [
"[email protected]"
] | |
858c51c8c9f563c0c5054b8d8466a2f7140398c7 | 52d9c6d005b2e91f489fdd817059b1217efd711d | /_downloads/8591a6f0671d02c692445320b45c6776/date_demo_rrule.py | 56d2ebf97b135046b79231488af7e5385b733588 | [] | no_license | yuhaihaiyu/matplotlib.github.com | 2a785654d4a0e4a9b6d1876b0aae96b6b5d20fc5 | fbe748c706e92f9ccb660eab656deaebe179a6af | refs/heads/master | 2023-07-31T08:04:16.716267 | 2021-10-02T06:00:49 | 2021-10-02T06:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | ../../stable/_downloads/8591a6f0671d02c692445320b45c6776/date_demo_rrule.py | [
"[email protected]"
] | |
751baeeaf78e31e7c30ff0263dce2e8a7717fb44 | 48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a | /Pibow/corner_to_corner_3_v2.py | 4b9f958fcb0bdf6453d970f0e489ffbcd7e54229 | [
"MIT"
] | permissive | Breakfast-for-Pigeons/Unicorn-HAT | 1ae033bf11c05b9cc739b1eacfc77665506e0bc8 | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | refs/heads/master | 2021-06-06T12:22:48.162031 | 2020-10-22T17:31:51 | 2020-10-22T17:31:51 | 74,648,524 | 1 | 0 | null | 2018-10-02T17:37:31 | 2016-11-24T07:28:23 | Python | UTF-8 | Python | false | false | 7,514 | py | #!/usr/bin/python3
"""
Corner to Corner 3 version 2- Pibow
Moves a square from the lower right corner to the upper left corner.
Instead of cycling through all the colors, a specific color must be sent
to the function as an argument.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from time import sleep
import unicornhat
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def corner_to_corner_3_v2(color):
"""
Moves a square from the lower right corner to the upper left corner.
Arguments:
This function takes an RGB tuple as an argument argument.
"""
sleep_speed = 0.1
off = (0, 0, 0)
unicornhat.set_pixel(7, 7, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 7, color)
unicornhat.set_pixel(6, 6, color)
unicornhat.set_pixel(7, 6, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 7, color)
unicornhat.set_pixel(5, 6, color)
unicornhat.set_pixel(5, 5, color)
unicornhat.set_pixel(6, 5, color)
unicornhat.set_pixel(7, 5, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 7, color)
unicornhat.set_pixel(4, 6, color)
unicornhat.set_pixel(4, 5, color)
unicornhat.set_pixel(4, 4, color)
unicornhat.set_pixel(5, 4, color)
unicornhat.set_pixel(6, 4, color)
unicornhat.set_pixel(7, 4, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 7, color)
unicornhat.set_pixel(3, 6, color)
unicornhat.set_pixel(3, 5, color)
unicornhat.set_pixel(3, 4, color)
unicornhat.set_pixel(3, 3, color)
unicornhat.set_pixel(4, 3, color)
unicornhat.set_pixel(5, 3, color)
unicornhat.set_pixel(6, 3, color)
unicornhat.set_pixel(7, 3, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 7, color)
unicornhat.set_pixel(2, 6, color)
unicornhat.set_pixel(2, 5, color)
unicornhat.set_pixel(2, 4, color)
unicornhat.set_pixel(2, 3, color)
unicornhat.set_pixel(2, 2, color)
unicornhat.set_pixel(3, 2, color)
unicornhat.set_pixel(4, 2, color)
unicornhat.set_pixel(5, 2, color)
unicornhat.set_pixel(6, 2, color)
unicornhat.set_pixel(7, 2, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 7, color)
unicornhat.set_pixel(1, 6, color)
unicornhat.set_pixel(1, 5, color)
unicornhat.set_pixel(1, 4, color)
unicornhat.set_pixel(1, 3, color)
unicornhat.set_pixel(1, 2, color)
unicornhat.set_pixel(1, 1, color)
unicornhat.set_pixel(2, 1, color)
unicornhat.set_pixel(3, 1, color)
unicornhat.set_pixel(4, 1, color)
unicornhat.set_pixel(5, 1, color)
unicornhat.set_pixel(6, 1, color)
unicornhat.set_pixel(7, 1, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 7, color)
unicornhat.set_pixel(0, 6, color)
unicornhat.set_pixel(0, 5, color)
unicornhat.set_pixel(0, 4, color)
unicornhat.set_pixel(0, 3, color)
unicornhat.set_pixel(0, 2, color)
unicornhat.set_pixel(0, 1, color)
unicornhat.set_pixel(0, 0, color)
unicornhat.set_pixel(1, 0, color)
unicornhat.set_pixel(2, 0, color)
unicornhat.set_pixel(3, 0, color)
unicornhat.set_pixel(4, 0, color)
unicornhat.set_pixel(5, 0, color)
unicornhat.set_pixel(6, 0, color)
unicornhat.set_pixel(7, 0, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(7, 0, off)
unicornhat.set_pixel(7, 1, off)
unicornhat.set_pixel(7, 2, off)
unicornhat.set_pixel(7, 3, off)
unicornhat.set_pixel(7, 4, off)
unicornhat.set_pixel(7, 5, off)
unicornhat.set_pixel(7, 6, off)
unicornhat.set_pixel(7, 7, off)
unicornhat.set_pixel(6, 7, off)
unicornhat.set_pixel(5, 7, off)
unicornhat.set_pixel(4, 7, off)
unicornhat.set_pixel(3, 7, off)
unicornhat.set_pixel(2, 7, off)
unicornhat.set_pixel(1, 7, off)
unicornhat.set_pixel(0, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 0, off)
unicornhat.set_pixel(6, 1, off)
unicornhat.set_pixel(6, 2, off)
unicornhat.set_pixel(6, 3, off)
unicornhat.set_pixel(6, 4, off)
unicornhat.set_pixel(6, 5, off)
unicornhat.set_pixel(6, 6, off)
unicornhat.set_pixel(5, 6, off)
unicornhat.set_pixel(4, 6, off)
unicornhat.set_pixel(3, 6, off)
unicornhat.set_pixel(2, 6, off)
unicornhat.set_pixel(1, 6, off)
unicornhat.set_pixel(0, 6, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 0, off)
unicornhat.set_pixel(5, 1, off)
unicornhat.set_pixel(5, 2, off)
unicornhat.set_pixel(5, 3, off)
unicornhat.set_pixel(5, 4, off)
unicornhat.set_pixel(5, 5, off)
unicornhat.set_pixel(4, 5, off)
unicornhat.set_pixel(3, 5, off)
unicornhat.set_pixel(2, 5, off)
unicornhat.set_pixel(1, 5, off)
unicornhat.set_pixel(0, 5, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 0, off)
unicornhat.set_pixel(4, 1, off)
unicornhat.set_pixel(4, 2, off)
unicornhat.set_pixel(4, 3, off)
unicornhat.set_pixel(4, 4, off)
unicornhat.set_pixel(3, 4, off)
unicornhat.set_pixel(2, 4, off)
unicornhat.set_pixel(1, 4, off)
unicornhat.set_pixel(0, 4, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 0, off)
unicornhat.set_pixel(3, 1, off)
unicornhat.set_pixel(3, 2, off)
unicornhat.set_pixel(3, 3, off)
unicornhat.set_pixel(2, 3, off)
unicornhat.set_pixel(1, 3, off)
unicornhat.set_pixel(0, 3, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 0, off)
unicornhat.set_pixel(2, 1, off)
unicornhat.set_pixel(2, 2, off)
unicornhat.set_pixel(1, 2, off)
unicornhat.set_pixel(0, 2, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 0, off)
unicornhat.set_pixel(1, 1, off)
unicornhat.set_pixel(0, 1, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 0, off)
unicornhat.show()
sleep(sleep_speed)
if __name__ == '__main__':
COLORS = [C1, C2, C3, C4, C5, C6, C7, C8]
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
for COLOR in COLORS:
corner_to_corner_3_v2(COLOR)
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| [
"[email protected]"
] | |
4a625cc49e2d484363ea090f357a0e45dc2e536a | 9e28200b71d43de1e122a964e88f1b547bfde465 | /question_leetcode/702.py | e39791835a7143e971e1b37e879656148c9a064b | [] | no_license | paul0920/leetcode | 6f8a7086eefd3e9bccae83752ef41cbfee1acaea | 474886c5c43a6192db2708e664663542c2e39548 | refs/heads/master | 2023-08-19T14:10:10.494355 | 2021-09-16T20:26:50 | 2021-09-16T20:26:50 | 290,560,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # """
# This is ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
# class ArrayReader(object):
# def get(self, index):
# """
# :type index: int
# :rtype int
# """
class Solution(object):
def search(self, reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
kth = 1
while reader.get(kth - 1) < target:
kth *= 2
left = 0
right = kth
while left + 1 < right:
mid = left + (right - left) // 2
if reader.get(mid) < target:
left = mid
else:
right = mid
if reader.get(left) == target:
return left
if reader.get(right) == target:
return right
return -1
| [
"[email protected]"
] | |
fec9af424e5192af1a758a4184341390bd59a6f7 | b47fb5884e25ec189ab123c620fc651702774e61 | /assets/migrations/0001_initial.py | 4c5e27db2ad79e50e511e7dbbf26e703ea3e135a | [] | no_license | cs4224485/CMDB | e4782ac81b8c8394a1445c4a9f85777f7859354d | 41710e97fc79ae228f9f654fc5879910e91e1e25 | refs/heads/master | 2020-09-20T18:16:41.479379 | 2019-11-28T02:49:57 | 2019-11-28T02:49:57 | 224,557,148 | 0 | 0 | null | 2019-11-28T02:52:40 | 2019-11-28T02:49:28 | JavaScript | UTF-8 | Python | false | false | 19,877 | py | # Generated by Django 2.1.1 on 2019-06-28 02:36
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('asset_type', models.CharField(choices=[('server', '服务器'), ('networkdevice', '网络设备'), ('storagedevice', '存储设备'), ('securitydevice', '安全设备'), ('securitydevice', '机房设备'), ('software', '软件资产')], default='server', max_length=64)),
('name', models.CharField(max_length=64, unique=True)),
('sn', models.CharField(max_length=128, unique=True, verbose_name='资产SN号')),
('management_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='管理IP')),
('trade_date', models.DateField(blank=True, null=True, verbose_name='购买时间')),
('expire_date', models.DateField(blank=True, null=True, verbose_name='过保修期')),
('price', models.FloatField(blank=True, null=True, verbose_name='价格')),
('status', models.SmallIntegerField(choices=[(0, '在线'), (1, '已下线'), (2, '未知'), (3, '故障'), (4, '备用')], default=0)),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '资产总表',
'verbose_name_plural': '资产总表',
},
),
migrations.CreateModel(
name='BusinessUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True, verbose_name='业务线')),
('memo', models.CharField(blank=True, max_length=64, verbose_name='备注')),
('parent_unit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='parent_level', to='assets.BusinessUnit')),
],
options={
'verbose_name': '业务线',
'verbose_name_plural': '业务线',
},
),
migrations.CreateModel(
name='Contract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(max_length=128, unique=True, verbose_name='合同号')),
('name', models.CharField(max_length=64, verbose_name='合同名称')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('price', models.IntegerField(verbose_name='合同金额')),
('detail', models.TextField(blank=True, null=True, verbose_name='合同详细')),
('start_date', models.DateField(blank=True)),
('end_date', models.DateField(blank=True)),
('license_num', models.IntegerField(blank=True, verbose_name='license数量')),
('create_date', models.DateField(auto_now_add=True)),
('update_date', models.DateField(auto_now=True)),
],
options={
'verbose_name': '合同',
'verbose_name_plural': '合同',
},
),
migrations.CreateModel(
name='CPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cpu_model', models.CharField(blank=True, max_length=128, verbose_name='CPU型号')),
('cpu_count', models.SmallIntegerField(verbose_name='物理cpu个数')),
('cpu_core_count', models.SmallIntegerField(verbose_name='cpu核数')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': 'CPU部件',
'verbose_name_plural': 'CPU部件',
},
),
migrations.CreateModel(
name='Disk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('slot', models.CharField(max_length=64, verbose_name='插槽位')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='磁盘型号')),
('capacity', models.FloatField(verbose_name='磁盘容量GB')),
('iface_type', models.CharField(choices=[('SATA', 'SATA'), ('SAS', 'SAS'), ('SCSI', 'SCSI'), ('SSD', 'SSD')], default='SAS', max_length=64, verbose_name='接口类型')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '硬盘',
'verbose_name_plural': '硬盘',
},
),
migrations.CreateModel(
name='EventLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='事件名称')),
('event_type', models.SmallIntegerField(choices=[(1, '硬件变更'), (2, '新增配件'), (3, '设备下线'), (4, '设备上线'), (5, '定期维护'), (6, '业务上线\\更新\\变更'), (7, '其它')], verbose_name='事件类型')),
('component', models.CharField(blank=True, max_length=255, null=True, verbose_name='事件子项')),
('detail', models.TextField(verbose_name='事件详情')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='事件时间')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '事件纪录',
'verbose_name_plural': '事件纪录',
},
),
migrations.CreateModel(
name='IDC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True, verbose_name='机房名称')),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
],
options={
'verbose_name': '机房',
'verbose_name_plural': '机房',
},
),
migrations.CreateModel(
name='Manufactory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('manufactory', models.CharField(max_length=64, unique=True, verbose_name='厂商名称')),
('support_num', models.CharField(blank=True, max_length=30, verbose_name='支持电话')),
('memo', models.CharField(blank=True, max_length=128, verbose_name='备注')),
],
options={
'verbose_name': '厂商',
'verbose_name_plural': '厂商',
},
),
migrations.CreateModel(
name='NetworkDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, '路由器'), (1, '交换机'), (2, '负载均衡'), (4, 'VPN设备')], default=0, verbose_name='网络设备类型')),
('vlan_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='VlanIP')),
('intranet_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='内网IP')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='型号')),
('firmware', models.CharField(blank=True, max_length=128, null=True, verbose_name='固件')),
('port_num', models.SmallIntegerField(blank=True, null=True, verbose_name='端口个数')),
('device_detail', models.TextField(blank=True, null=True, verbose_name='设置详细配置')),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '网络设备',
'verbose_name_plural': '网络设备',
},
),
migrations.CreateModel(
name='NIC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=64, null=True, verbose_name='网卡名')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='网卡型号')),
('macaddress', models.CharField(max_length=64, unique=True, verbose_name='MAC')),
('ipaddress', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP')),
('netmask', models.CharField(blank=True, max_length=64, null=True)),
('bonding', models.CharField(blank=True, max_length=64, null=True)),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '网卡',
'verbose_name_plural': '网卡',
},
),
migrations.CreateModel(
name='RaidAdaptor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('slot', models.CharField(max_length=64, verbose_name='插口')),
('model', models.CharField(blank=True, max_length=64, null=True, verbose_name='型号')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
),
migrations.CreateModel(
name='RAM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('model', models.CharField(max_length=128, verbose_name='内存型号')),
('slot', models.CharField(max_length=64, verbose_name='插槽')),
('capacity', models.IntegerField(verbose_name='内存大小(MB)')),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': 'RAM',
'verbose_name_plural': 'RAM',
},
),
migrations.CreateModel(
name='SecurityDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, '防火墙'), (1, '入侵检测设备'), (2, '互联网网关'), (4, '运维审计系统')], default=0, verbose_name='服务器类型')),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, 'PC服务器'), (1, '刀片机'), (2, '小型机')], default=0, verbose_name='服务器类型')),
('created_by', models.CharField(choices=[('auto', 'Auto'), ('manual', 'Manual')], default='auto', max_length=32)),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='型号')),
('raid_type', models.CharField(blank=True, max_length=512, null=True, verbose_name='raid类型')),
('os_type', models.CharField(blank=True, max_length=64, null=True, verbose_name='操作系统类型')),
('os_distribution', models.CharField(blank=True, max_length=64, null=True, verbose_name='发型版本')),
('os_release', models.CharField(blank=True, max_length=64, null=True, verbose_name='操作系统版本')),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
('hosted_on', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hosted_on_server', to='assets.Server')),
],
options={
'verbose_name': '服务器',
'verbose_name_plural': '服务器',
},
),
migrations.CreateModel(
name='Software',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, 'OS'), (1, '办公\\开发软件'), (2, '业务软件')], default=0, verbose_name='服务器类型')),
('license_num', models.IntegerField(verbose_name='授权数')),
('version', models.CharField(help_text='eg. CentOS release 6.5 (Final)', max_length=64, unique=True, verbose_name='软件/系统版本')),
],
options={
'verbose_name': '软件/系统',
'verbose_name_plural': '软件/系统',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True, verbose_name='Tag name')),
('create_date', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('name', models.CharField(max_length=32, verbose_name='姓名')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='tag',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.UserProfile'),
),
migrations.AddField(
model_name='eventlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.UserProfile', verbose_name='事件源'),
),
migrations.AddField(
model_name='asset',
name='admin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.UserProfile', verbose_name='资产管理员'),
),
migrations.AddField(
model_name='asset',
name='business_unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.BusinessUnit', verbose_name='所属业务线'),
),
migrations.AddField(
model_name='asset',
name='contract',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.Contract', verbose_name='合同'),
),
migrations.AddField(
model_name='asset',
name='idc',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.IDC', verbose_name='IDC机房'),
),
migrations.AddField(
model_name='asset',
name='manufactory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.Manufactory', verbose_name='制造商'),
),
migrations.AddField(
model_name='asset',
name='tags',
field=models.ManyToManyField(blank=True, to='assets.Tag'),
),
migrations.AlterUniqueTogether(
name='ram',
unique_together={('asset', 'slot')},
),
migrations.AlterUniqueTogether(
name='raidadaptor',
unique_together={('asset', 'slot')},
),
migrations.AlterUniqueTogether(
name='nic',
unique_together={('asset', 'macaddress')},
),
migrations.AlterUniqueTogether(
name='disk',
unique_together={('asset', 'slot')},
),
]
| [
"[email protected]"
] | |
0242ad91656a9be579908b441d9b94af3542b343 | ef821468b081ef2a0b81bf08596a2c81e1c1ef1a | /Programming Basics/Nested_Loops-LAB/Cinema_Tickets.py | 7dae726c936e27321845167e1b72d8edcf1c7c38 | [] | no_license | Ivaylo-Atanasov93/The-Learning-Process | 71db22cd79f6d961b9852f140f4285ef7820dd80 | 354844e2c686335345f6a54b3af86b78541ed3f3 | refs/heads/master | 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | movie = ''
free_seats = 0
ticket_type = ''
sold_seats = 0
student = 0
standard = 0
kids = 0
total_tickets = 0
flag = False
while not flag:
movie = input()
if movie == 'Finish':
break
free_seats = int(input())
while ticket_type != 'End':
ticket_type = input()
if ticket_type == 'student':
student += 1
sold_seats += 1
total_tickets += 1
elif ticket_type == 'standard':
standard += 1
sold_seats += 1
total_tickets += 1
elif ticket_type == 'kid':
kids += 1
sold_seats += 1
total_tickets += 1
elif ticket_type == 'End':
print(f'{movie} - {(sold_seats / free_seats) * 100:.2f}% full.')
elif ticket_type == 'Finish':
print(f'{movie} - {(sold_seats / free_seats) * 100:.2f}% full.')
flag = True
break
if sold_seats == free_seats:
print(f'{movie} - {(sold_seats / free_seats) * 100:.2f}% full.')
break
sold_seats = 0
ticket_type = ''
if flag:
break
print(f'Total tickets: {total_tickets}')
print(f'{(student / total_tickets) * 100:.2f}% student tickets.')
print(f'{(standard / total_tickets) * 100:.2f}% standard tickets.')
print(f'{(kids / total_tickets) * 100:.2f}% kids tickets.')
| [
"[email protected]"
] | |
6b9966fd76928a69a7d63ecd8c2b9856b2bfa4c9 | a46825af0830a0f84f426547fba1b1f45fb97b3f | /backend/apps/area/urls.py | ce30e18f5bcb945d04fae1d297b2a0b9a011ea19 | [] | no_license | szshysj/Digital_marketing_web | 47544c7b9e0c425a78b0d51195ac245fdaef0503 | 86b31f261158b4c8d130c64ae7e573b8316c8bc4 | refs/heads/master | 2020-08-29T21:26:23.402279 | 2020-03-18T07:55:27 | 2020-03-18T07:55:27 | 218,178,158 | 0 | 0 | null | 2019-12-05T10:16:02 | 2019-10-29T01:26:07 | Vue | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/10/14 23:28
# @Author : 孔祥旭
# @Email : [email protected] / [email protected]
from tornado.web import url
from apps.area.handler import GetAreaHandler
urlpatten = [
# 获取所有可投放地域列表
url('/get/area/', GetAreaHandler)
]
| [
"[email protected]"
] | |
2d322f049fa8f8f91dfb80709a634df823f3de47 | 26f6313772161851b3b28b32a4f8d255499b3974 | /Python/RelativeRanks.py | d749965e66f0cfbeff330cd167f3bbe034cf128d | [] | no_license | here0009/LeetCode | 693e634a3096d929e5c842c5c5b989fa388e0fcd | f96a2273c6831a8035e1adacfa452f73c599ae16 | refs/heads/master | 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | """
Given scores of N athletes, find their relative ranks and the people with the top three highest scores, who will be awarded medals: "Gold Medal", "Silver Medal" and "Bronze Medal".
Example 1:
Input: [5, 4, 3, 2, 1]
Output: ["Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"]
Explanation: The first three athletes got the top three highest scores, so they got "Gold Medal", "Silver Medal" and "Bronze Medal".
For the left two athletes, you just need to output their relative ranks according to their scores.
Note:
N is a positive integer and won't exceed 10,000.
All the scores of athletes are guaranteed to be unique.
"""
class Solution:
def findRelativeRanks(self, scores):
"""
:type nums: List[int]
:rtype: List[str]
"""
medals = ["Gold Medal", "Silver Medal", "Bronze Medal"]
sorted_scores_dict = {}
for order,score in enumerate(sorted(scores, reverse = True)):
if order <= 2:
sorted_scores_dict[score] = medals[order]
else:
sorted_scores_dict[score] = str(order + 1)
res = [sorted_scores_dict[score] for score in scores]
return res
s = Solution()
scores = [5, 4, 3, 2, 1]
print(s.findRelativeRanks(scores)) | [
"[email protected]"
] | |
a5d7d1e55d35d5dc95e02c6e501613df521f4fb6 | 3873b03ac81354d4ed24e94df5fa8429e726bbd2 | /titles/101. 对称二叉树.py | 9236ac01422c19b4ad9190406e21e379f2b5f6e8 | [] | no_license | lichangg/myleet | 27032f115597481b6c0f3bbe3b83e80b34c76365 | 3d5a96d896ede3ea979783b8053487fe44e38969 | refs/heads/master | 2023-03-21T15:50:14.128422 | 2021-03-16T09:58:07 | 2021-03-16T09:58:07 | 286,616,721 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from utils.util_funcs import Tree
# 这个测试用例[1,2,2,None,3,None,3]过不了,本地能过
#
class Solution:
FLAG=True
def isSymmetric(self, root) -> bool:
l = root.left
r = root.right
stack = [l,r]
while stack and all(stack):
nums = []
for i in stack:
nums.append(i.val)
mid=int(len(nums)/2)
if nums[:mid]!=nums[mid:][::-1]:
Solution.FLAG = False
break
temp = []
for j in stack:
if j:
temp.append(j.left)
temp.append(j.right)
stack = temp
return Solution.FLAG
# 二刷,层序遍历
class Solution:
def isSymmetric(self, root) -> bool:
def is_symmetric(nums):
return nums == nums[::-1]
stack = [root]
while stack:
res = []
temp = []
for i in stack:
if i:
res.append(i.val)
temp.append(i.left)
temp.append(i.right)
else:res.append(None)
flag = is_symmetric(res)
if not flag:
return False
stack = temp
return True
t=Tree()
[t.add(i)for i in [1,2,2,None,3,None,3]]
a=Solution().isSymmetric(t.root)
print(a) | [
"[email protected]"
] | |
d7cf184777adc0f7980c16fcc2f6eabb750712be | 6c14069181f313e84eeb524dd495e3882156ef50 | /samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-mpls-ldp-oper/nc-read-xr-mpls-ldp-oper-10-ydk.py | 1e872ab64c188f2aa654c40dc1d1d96d25dd113a | [
"Apache-2.0"
] | permissive | decolnz/ydk-py-samples | dde0fd64fd4df12a215588766a0f1fb8baf07fcd | 7fa3f53c4d458c3332d372fb2fe3c46c5e036f07 | refs/heads/master | 2021-01-19T03:24:19.877929 | 2017-04-04T17:16:46 | 2017-04-04T17:16:46 | 87,310,389 | 1 | 0 | null | 2017-04-05T13:06:57 | 2017-04-05T13:06:57 | null | UTF-8 | Python | false | false | 2,714 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Read all data for model Cisco-IOS-XR-mpls-ldp-oper.
usage: nc-read-xr-mpls-ldp-oper-10-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_mpls_ldp_oper \
as xr_mpls_ldp_oper
import logging
def process_mpls_ldp(mpls_ldp):
"""Process data in mpls_ldp object."""
pass
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
mpls_ldp = xr_mpls_ldp_oper.MplsLdp() # create object
# read data from NETCONF device
# mpls_ldp = crud.read(provider, mpls_ldp)
process_mpls_ldp(mpls_ldp) # process object data
provider.close()
exit()
# End of script
| [
"[email protected]"
] | |
a406e2a620162230e1e7cc273a3998b61cf94a92 | 74926d4145b9cd91bd040a7887d6baef838865d3 | /autoencoder/metrics.py | 3ad28ecd5bf3da0615cf6a06bd1ac56acd7e6403 | [
"MIT"
] | permissive | Elaine0/Anomaly-Detection | 3837b602c6c8ba12fb2df7170292ebded893bbe0 | 45ab34235fd865006292a6645bbf2fc8bed9e959 | refs/heads/master | 2023-06-16T16:27:12.675954 | 2021-07-13T09:06:19 | 2021-07-13T09:06:19 | 282,931,372 | 0 | 0 | null | 2020-07-27T15:02:17 | 2020-07-27T15:02:17 | null | UTF-8 | Python | false | false | 455 | py | import tensorflow as tf
from tensorflow import keras
import keras.backend as K
def ssim_metric(dynamic_range):
def ssim(imgs_true, imgs_pred):
return K.mean(tf.image.ssim(imgs_true, imgs_pred, dynamic_range), axis=-1)
return ssim
def mssim_metric(dynamic_range):
def mssim(imgs_true, imgs_pred):
return K.mean(
tf.image.ssim_multiscale(imgs_true, imgs_pred, dynamic_range), axis=-1
)
return mssim
| [
"[email protected]"
] | |
c5b06264124b13023d9bd904c223f6a20f2da8ab | d49cfe38764aa35992ba5cf65655a6a45d9487c8 | /旋转图像.py | 1660cfd80a5903f2b51d956378ca51aa57d5f90a | [] | no_license | getabear/leetcode | fc0797f664ab4052aa2635341f4bbe40b74ec2b8 | 4af6608166f2e4cdfcfb0bbb92133b4a0f90ea34 | refs/heads/master | 2021-07-15T11:05:23.049235 | 2020-11-15T12:57:14 | 2020-11-15T12:57:14 | 224,601,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from typing import List
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
if not matrix:
return
m,n=len(matrix),len(matrix[0])
dp=[[0]*n for _ in range(m)]
for h in range(m):
for x in range(n):
dp[x][m-h-1]=matrix[h][x]
matrix[:]=dp[:]
return
a=Solution()
matrix=[[1,2,3],[4,5,6],[7,8,9]]
a.rotate(matrix) | [
"[email protected]"
] | |
e97231aa59386188f10462edf9ebb223d62915b0 | 7d99c16d3222dd09d2358dac17d693deb7ed8dfd | /mwk_converters/mwk_to_sqlite3.py | a662445947c9bf94dfd56abd7f356d3172ba54b1 | [] | no_license | afcarl/mw_data_analysis_helpers | 55c287daa06ef398e25ee9a8ecb290fc1f58c4dc | 88e8eaae3b26f2ce7c482585414340c8e59f6ed2 | refs/heads/master | 2020-03-17T14:56:10.483526 | 2011-07-26T17:50:01 | 2011-07-26T17:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,031 | py | #!/usr/bin/env python
import logging, os, sys
import sqlite3
import json
#import mwk
import mworks.data as mwk
def mwk_to_sqlite3(inFile, outFile, blacklist=[]):
m = mwk.MWKFile(inFile)
m.open()
# fix codec
codec = m.codec
codec[0], codec[1], codec[2], codec[3] = ('#codec', '#systemEvent', '#components', '#termination')
revCodec = {}
for k,v in codec.iteritems():
revCodec[v] = k
evs = m.get_events()
# open sqlite3 database
logging.debug("opening sqlite3 database: %s" % outFile)
conn = sqlite3.connect(outFile)
c = conn.cursor()
# # make table to add to data files table
# logging.debug("adding information to db")
# c.execute('''create table datafiles
# (animal text, day text)''')
# make table for new data
# tableName = os.path.splitext(os.path.basename(inFile))[0]
# cmd = "create table %s (code int, time int, value text)" % tableName
# c.execute(cmd)
c.execute('''create table events
(code int, time int, value text)''')
# make table for codec
# codecTableName = "%s_codec" % tableName
# cmd = "create table %s (code int, name text)" % codecTableName
# c.execute(cmd)
c.execute('''create table codec
(code int, name text)''')
# # add information to datafiles table
# animal = tableName.split('_')[0].lower()
# day = tableName.split('_')[1]
# c.execute('''insert into datafiles
# values(?,?)''', (animal, day))
# add codec to database
#codec = m.codec
# cmd = "insert into %s values(?,?)" % codecTableName
for (k,v) in codec.iteritems():
# c.execute(cmd,(k,v))
c.execute('''insert into codec values (?,?)''',(k,v))
# add events to database
logging.debug("adding events to db")
# cmd = "insert into %s values(?,?,?)" % tableName
for e in evs:
if codec[e.code] in blacklist:
continue
# c.execute(cmd, (e.code, e.time, json.dumps(e.value)))
c.execute('''insert into events
values(?,?,?)''', (e.code, e.time, json.dumps(e.value)))
logging.debug("cleaning up")
# close database connection
conn.commit()
c.close()
# close mworks file
m.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
eventsBlacklist = ['#announceCurrentState','#codec', '#systemEvent', '#components', '#termination'] # not implemented
# parse command line arguments
logging.debug("Parsing command line arguments")
if len(sys.argv) == 3:
inFile = sys.argv[1]
outFile = sys.argv[2]
elif len(sys.argv) == 2:
inFile = sys.argv[1]
outFile = '%s.sqlite3' % os.path.splitext(os.path.basename(inFile))[0]
else:
print "Usage: %s input_mwk_file (output_sqlite3_file)" % __file__
sys.exit(1)
# open up and read mwks file
logging.debug("opening and reading mwks file: %s" % inFile)
mwk_to_sqlite3(inFile, outFile, eventsBlacklist)
# exit nicely
sys.exit(0) | [
"[email protected]"
] | |
30d71a0f811024388f46fa160a7fb991a7429ec3 | 76e9afdf16eabcc9e1a3facd308e56362112efc4 | /20210222_ls/auto_chmod.py | 84ae38d02246d5ec3b8c66e17a9fcebb764dc397 | [] | no_license | rerejii/pwb_work_2021 | c65c5e787ad98b7d847cb63ebadc24a02f001e90 | 8ecfb2a98d9d396ed505ecc939e384cf6400412d | refs/heads/main | 2023-03-30T10:43:18.115386 | 2021-03-24T05:38:41 | 2021-03-24T05:38:41 | 350,954,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import time
import sys
import os
# args = sys.argv
path = '/nas-homes/krlabmember/hayakawa/binary/20210115'
while True:
os.chmod(path, 0o755)
time.sleep(10) | [
"[email protected]"
] | |
7cd8898e0e3005975525306f1622e0d54d94136b | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/examples/PyQt5/Chapter13_Running Python Scripts on Android and iOS/demoMultipleSelection.py | 96a2d86b0e8e60e19544ee49d3e46b244fb8b9fb | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 573 | py | import android
app = android.Android()
app.dialogCreateAlert("Select your food items")
app.dialogSetMultiChoiceItems(['Pizza', 'Burger', 'Hot Dog'])
app.dialogSetPositiveButtonText('Done')
app.dialogShow()
app.dialogGetResponse()
response = app.dialogGetSelectedItems()
print(response)
selectedResult=response[1]
n=len(selectedResult)
print("You have selected following food items: ")
for i in range(0, n):
if selectedResult[i]==0:
print("Pizza")
elif selectedResult[i]==1:
print("Burger")
elif selectedResult[i]==2:
print("Hot Dog")
| [
"[email protected]"
] | |
a3976a033b050da9d584f2ee555049bc57e48660 | 5fe194b477ba8af8acc846db2dfc961ad2a57013 | /.metadata/.plugins/org.eclipse.core.resources/.history/5/c0e3511f98fb001611e7bcc2e467e756 | 55afe0616ecc9457ecdb14d257b97cdcb7536b33 | [] | no_license | robotanica/ExcavatorROS | 6a129e302f0a288d198e3f720a78610e6333f7d3 | db0bdf0f9988ebf910f832e22f46f679e936cdcb | refs/heads/master | 2023-03-17T23:49:58.762691 | 2017-09-29T16:31:33 | 2017-09-29T16:31:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | #!/usr/bin/python
'''
Created on Feb 21, 2017
@author: yutak
'''
import rospy
import sensor_msgs.msg as smsg
import exp_excavator.msg as cmsg
class JoyMsgManager:
def __init__(self):
rospy.init_node('joy_msg_manager', anonymous=True)
self.test = rospy.get_param('~test', False)
self.joy_val_msg = cmsg.JointValues()
self.joy_val_msg.boom = 0.0
self.joy_val_msg.arm = 0.0
self.joy_val_msg.bucket = 0.0
self.joy_val_msg.swing = 0.0
self.sub_spd_com_bucket = rospy.Subscriber('joy_right', smsg.Joy,
self.cb_joy_right)
self.sub_joy_left = rospy.Subscriber('joy_left', smsg.Joy,
self.cb_joy_left)
self.pub_joy_values = rospy.Publisher('joy_values', cmsg.JointValues,
queue_size= 10)
def cb_joy_right(self, joy):
self.joy_val_msg.boom = joy.axes[1]
self.joy_val_msg.bucket = joy.axes[0]
if self.test:
rospy.loginfo('Boom Joystick Value:%f' %self.joy_val_msg.boom)
rospy.loginfo('Bucket Joystick Value:%f'
%self.joy_val_msg.bucket)
self.pub_joy_values.publish(self.joy_val_msg)
def cb_joy_left(self, joy):
self.joy_val_msg.arm = joy.axes[1]
self.joy_val_msg.swing = joy.axes[0]
if self.test:
rospy.loginfo('Arm Joystick Value:%f' %self.joy_val_msg.arm)
rospy.loginfo('Swing Joystick Value:%f'
%self.joy_val_msg.swing)
self.pub_joy_values.publish(self.joy_val_msg)
if __name__ == '__main__':
jm = JoyMsgManager()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass | [
"[email protected]"
] | ||
44260eaf54611020edd327e194c75925b182f899 | d1a5e15463623d75560659481e8277a516a8e280 | /Website_Project/mysite/account/views.py | c6f37f13125fb41f2acb3bc18dc0c8c6497fd11f | [] | no_license | LeeSungRyul/KD_AI | fac54863d6eb49fa8fe96dbb700e9279d2a1f0fb | 560e4c2e777ab5f5ee6a2a31f17372f58ea483ef | refs/heads/master | 2023-06-27T00:06:47.433793 | 2021-07-27T14:59:25 | 2021-07-27T14:59:25 | 339,369,278 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password, check_password
from .models import Account
# Create your views here.
def register(request): # 회원가입 페이지를 보여주기 위한 함수
if request.method == "GET":
return render(request, 'register.html')
elif request.method == "POST":
userID = request.POST.get('userID', None) # 딕셔너리형태
userPW = request.POST.get('userPW', None)
re_password = request.POST.get('re_password', None)
userMail = request.POST.get('userMail', None)
userPhone = request.POST.get('userPhone', None)
res_data = {}
if not (userID and userPW and re_password and userMail and userPhone):
res_data['error'] = "All values must be entered."
return render(request, 'register.html', res_data)
if userPW != re_password:
# return HttpResponse('비밀번호가 다릅니다.')
res_data['error'] = 'Confirm password does not match.'
return render(request, 'register.html', res_data)
else:
account = Account(userID=userID, userPW=make_password(userPW), userMail=userMail, userPhone=userPhone)
account.save()
return redirect('/login/')
# register를 요청받으면 register.html 로 응답. return render(request, 'register.html')
# res_data: html 파일에서 {{error}}와 맵핑되어 처리. 즉, if문에서 걸리면 뒤의 문자열이 출력
def login(request):
response_data = {}
if request.method == "GET":
return render(request, 'login.html')
elif request.method == "POST":
if '_login' in request.POST:
login_userID = request.POST.get('userID', None)
login_userPW = request.POST.get('userPW', None)
# 아이디와 PW 중 어느 하나라도 입력되지 않은 경우
if not (login_userID and login_userPW):
response_data['error'] = "All values must be entered."
else:
account = Account.objects.get(userID=login_userID)
# db에서 꺼내는 명령. Post로 받아온 userID로 , db의 userID을 꺼내온다.
if check_password(login_userPW, account.userPW):
request.session['account'] = account.userID
# 세션도 딕셔너리 변수 사용과 똑같이 사용하면 된다.
# 세션 account라는 key에 방금 로그인한 id를 저장한것.
return redirect('/') # 로그인 된 홈 화면 이동
else:
response_data['error'] = "Invalid username or password."
return render(request, 'login.html', response_data)
elif '_register' in request.POST:
return redirect('/login/register/')
| [
"[email protected]"
] | |
f8720cb40162973a04d2461826c550fb6a66e68e | 9672e0b45f72261c069aa8140a01e861b8f8db45 | /query/migrations/0005_auto_20150526_1736.py | 970fbb78131ebda45f4baae6af990612482e6cf4 | [] | no_license | KeleiAzz/SCRC_server | 40882c0d5804b0488dd31f4d4db353616d318e48 | 669d45e4d5059cfc766a2a0852d23522d2af7d84 | refs/heads/master | 2020-04-06T10:18:40.817298 | 2016-12-04T06:07:53 | 2016-12-04T06:07:53 | 35,839,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,782 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('query', '0004_evidence'),
]
operations = [
migrations.AlterField(
model_name='evidence',
name='h1',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h10',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h11',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h12',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h13',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h14',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h15',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h16',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h17',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h18',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h19',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h2',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h20',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h21',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h22',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h23',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h3',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h4',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h5',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h6',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h7',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h8',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h9',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
0d5d76ef5b074b1347fd493018487bfa5c02aa7b | 0019ea5621577ab9a9a694e3ef91d913e981a28e | /dataset_builder/feature_extractor/test_sub2vec_model_creator.py | ee9c0388664fcada20f99ab062229fd46de862ec | [] | no_license | sungc1/fake-news-framework_Py3 | 676710b3bf7b8feb4c237ffed7d1d280f4967890 | e3552b5bc2a30dbd52ad893ce8dd29aa2242f864 | refs/heads/main | 2023-01-19T23:42:13.294446 | 2020-12-01T18:38:31 | 2020-12-01T18:38:31 | 428,178,049 | 1 | 0 | null | 2021-11-15T08:18:23 | 2021-11-15T08:18:23 | null | UTF-8 | Python | false | false | 9,808 | py | import random
from unittest import TestCase
import networkx as nx
import pandas as pd
import numpy as np
from DB.schema_definition import DB, AuthorConnection, Author, Post
from dataset_builder.feature_extractor.sub2vec_model_creator import Sub2VecModelCreator
from dataset_builder.sub2vec_feature_generator import Sub2VecFeatureGenerator
class TestSub2VecModelCreator(TestCase):
@classmethod
def setUpClass(cls):
""" get_some_resource() is slow, to avoid calling it for each test use setUpClass()
and store the result as class variable
"""
super(TestSub2VecModelCreator, cls).setUpClass()
cls._db = DB()
cls._db.setUp()
cls.sub2vec_model_creator = Sub2VecModelCreator(cls._db)
cls.sub2vec_feature_generator = Sub2VecFeatureGenerator(cls._db, **{'authors': [], 'posts': {}})
edges = [(0, 4), (2, 0), (1, 3), (3, 1), (0, 1), (1, 2), (4, 0), (4, 3), (2, 3), (3, 0)]
cls.connected_undirected_graph = cls.create_undirected_graph(5, edges, 'connected_undirected_graph')
cls.unconnected_directed_graph = cls.connected_directed_graph(7, edges, 'unconnected_directed_graph')
cls.connected_directed_graph = cls.connected_directed_graph(5, edges, 'connected_directed_graph')
cls.unconnected_undirected_graph = cls.create_undirected_graph(7, edges, 'unconnected_undirected_graph')
cls.add_graph_to_db(cls.connected_undirected_graph)
cls.add_graph_to_db(cls.unconnected_directed_graph)
cls.add_graph_to_db(cls.connected_directed_graph)
cls.add_graph_to_db(cls.unconnected_undirected_graph)
@classmethod
def add_graph_to_db(cls, graph):
post = Post(post_id=str(graph.graph['name']), domain='flickr', post_osn_id=str(graph.graph['name']))
post.post_type = 'labels'
author_connections = []
for edge in graph.edges():
author_connections.append(AuthorConnection(source_author_guid=edge[0], destination_author_guid=edge[1],
connection_type=graph.graph['name']))
authors = []
for node in graph.nodes():
authors.append(Author(name=str(node), domain=str(graph.graph['name']), author_guid=str(node)))
cls._db.addPosts([post])
cls._db.addPosts(author_connections)
cls._db.addPosts(authors)
@classmethod
def create_undirected_graph(cls, nodes_count, edges, graph_name):
graph = nx.Graph()
return cls.build_graph(edges, graph, graph_name, nodes_count)
@classmethod
def connected_directed_graph(cls, nodes_count, edges, graph_name):
graph = nx.DiGraph()
return cls.build_graph(edges, graph, graph_name, nodes_count)
@classmethod
def build_graph(cls, edges, graph, graph_name, nodes_count):
graph.add_nodes_from(range(nodes_count))
graph.add_edges_from(edges)
# nx.set_node_attributes(graph, {}, 'label')
nx.set_node_attributes(graph, values={}, name='label')
graph.graph['name'] = graph_name
return graph
def setUp(self):
random.seed(900)
def assertArrayEquals(self, actual_vector, expected_vector):
for actual_value, expected_value in zip(actual_vector, expected_vector):
self.assertAlmostEqual(actual_value, expected_value, places=7)
def test_generate_structural_embedding_for_connected_undirected_graph(self):
args = {'dimensions': 128,
'window': 2,
'walkLength': 1000,
'iterations': 20,
'alpha': 0.5,
'dm': 1,
'wl_iterations': 2,
'randomWalkCount': 10}
embeddings = self.sub2vec_model_creator.graph_structural_embedding([self.connected_undirected_graph], **args)
self.assertEqual(len(embeddings), 1)
self.assertEqual(len(embeddings[0]), 128)
actual_vector = np.array((embeddings[0]))
self.assertTrue(any(actual_vector))
def test_generate_structural_embedding_for_unconnected_undirected_graph(self):
args = {'dimensions': 138,
'window': 2,
'walkLength': 100,
'iterations': 20,
'alpha': 0.5,
'dm': 1,
'randomWalkCount': 10}
embeddings = self.sub2vec_model_creator.graph_structural_embedding([self.unconnected_undirected_graph], **args)
self.assertEqual(len(embeddings), 1)
self.assertEqual(len(embeddings[0]), 138)
actual_vector = np.array((embeddings[0]))
self.assertTrue(any(actual_vector))
def test_generate_structural_embedding_for_connected_directed_graph(self):
args = {'dimensions': 138,
'window': 2,
'walkLength': 30,
'iterations': 20,
'alpha': 0.5,
'dm': 1,
'randomWalkCount': 10}
embeddings = self.sub2vec_model_creator.graph_structural_embedding([self.connected_directed_graph], **args)
self.assertEqual(len(embeddings), 1)
self.assertEqual(len(embeddings[0]), 138)
actual_vector = np.array((embeddings[0]))
self.assertTrue(any(actual_vector))
def test_generate_structural_embedding_for_unconnected_directed_graph(self):
args = {'dimensions': 138,
'window': 2,
'walkLength': 40,
'iterations': 20,
'alpha': 0.5,
'dm': 1,
'randomWalkCount': 10}
embeddings = self.sub2vec_model_creator.graph_structural_embedding([self.unconnected_directed_graph], **args)
self.assertEqual(len(embeddings), 1)
self.assertEqual(len(embeddings[0]), 138)
actual_vector = np.array((embeddings[0]))
self.assertTrue(any(actual_vector))
def test_generate_structural_embedding_for_4_graphs(self):
args = {'dimensions': 118,
'window': 2,
'walkLength': 40,
'iterations': 20,
'alpha': 0.5,
'dm': 1,
'randomWalkCount': 10}
graphs = [self.unconnected_directed_graph, self.connected_undirected_graph,
self.unconnected_undirected_graph, self.connected_directed_graph]
embeddings = self.sub2vec_model_creator.graph_structural_embedding(graphs, **args)
self.assertEqual(len(embeddings), 4)
self.assertEqual(len(embeddings[0]), 118)
self.assertEqual(len(embeddings[1]), 118)
self.assertEqual(len(embeddings[2]), 118)
self.assertEqual(len(embeddings[3]), 118)
self.assertTrue(any(np.array((embeddings[0]))))
self.assertTrue(any(np.array((embeddings[1]))))
self.assertTrue(any(np.array((embeddings[2]))))
self.assertTrue(any(np.array((embeddings[3]))))
def test_generate_author_features_from_sub2vec(self):
dimensions = 118
args = {'dimensions': dimensions,
'window': 2,
'walkLength': 40,
'iterations': 20,
'alpha': 0.5,
'dm': 1,
'randomWalkCount': 10}
graphs = [self.unconnected_directed_graph, self.connected_undirected_graph,
self.unconnected_undirected_graph, self.connected_directed_graph]
embeddings = self.sub2vec_model_creator.graph_structural_embedding(graphs, **args)
authors_features = self.sub2vec_model_creator.convert_embedding_to_author_features(graphs, embeddings)
self.assertEqual(len(authors_features), 4 * dimensions)
for graph, embedding in zip(graphs, embeddings):
actual = [f.attribute_value for f in authors_features if f.author_guid == graph.graph['name']]
self.assertArrayEquals(actual, embedding)
def test_load_graphs(self):
graphs = self.sub2vec_model_creator.load_graphs()
expected_graphs = [self.unconnected_directed_graph, self.connected_undirected_graph,
self.unconnected_undirected_graph, self.connected_directed_graph]
expected_graph_map = {expected_graph.graph['name']: expected_graph for expected_graph in expected_graphs}
for actual_graph in graphs:
expected_graph = expected_graph_map[actual_graph.graph['name']]
self.assertNodes(actual_graph, expected_graph)
self.assertEdges(actual_graph, expected_graph)
pass
def test_execute(self):
graphs = self.sub2vec_model_creator.load_graphs()
self.sub2vec_model_creator.execute()
embedding_table_name = self.sub2vec_model_creator._table_name
df = pd.read_sql_table(embedding_table_name, self._db.engine)
self.assertTupleEqual(df.shape, (len(graphs), self.sub2vec_model_creator._num_of_dimensions + 1))
pass
def test_sub2vec_feature_generator(self):
self.sub2vec_model_creator.execute()
self.sub2vec_feature_generator.execute()
graphs = [self.unconnected_directed_graph, self.connected_undirected_graph,
self.unconnected_undirected_graph, self.connected_directed_graph]
for graph in graphs:
actual_dimensions_count = len(self._db.get_author_features_by_author_guid(graph.graph['name']))
self.assertEqual(actual_dimensions_count, self.sub2vec_model_creator._num_of_dimensions)
def assertEdges(self, actual_graph, expected_graph):
edges = [(int(v), int(u)) for v, u in actual_graph.edges()]
self.assertListEqual(list(sorted(expected_graph.edges())), list(sorted(edges)))
def assertNodes(self, actual_graph, expected_graph):
self.assertListEqual(list(expected_graph.nodes()), list(sorted(map(int, actual_graph.nodes()))))
| [
"[email protected]"
] | |
1049caf450a5e4ca1eedc4aec81d6fe28ca216eb | bca6e5728aa041d348482e4265fd2c6f1f4a67d3 | /ucsmsdk/mometa/storage/StorageSasPort.py | 6a17becbc5ebeaadbdf5cee82300824e1e91d16c | [
"Apache-2.0"
] | permissive | psterdale/ucsmsdk | fc7c519ea1a43c5e77a015e3605bc2acfe3c917a | 821b805c18ad7652a79d4f581f4695558f17e943 | refs/heads/master | 2020-12-03T08:11:54.382427 | 2017-06-28T12:46:52 | 2017-06-28T12:46:52 | 95,667,187 | 1 | 0 | null | 2017-06-28T12:33:27 | 2017-06-28T12:33:27 | null | UTF-8 | Python | false | false | 3,873 | py | """This module contains the general information for StorageSasPort ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageSasPortConsts:
LC_ALLOCATED = "allocated"
LC_AVAILABLE = "available"
LC_DEALLOCATED = "deallocated"
LC_REPURPOSED = "repurposed"
LINK_SPEED_1_5_GBPS = "1-5-gbps"
LINK_SPEED_12_GBPS = "12-gbps"
LINK_SPEED_3_GBPS = "3-gbps"
LINK_SPEED_6_GBPS = "6-gbps"
LINK_SPEED_DISABLED = "disabled"
LINK_SPEED_DOWN = "down"
LINK_SPEED_HOST_POWER_OFF = "host-power-off"
LINK_SPEED_UNKNOWN = "unknown"
LINK_SPEED_UNSUPPORTED_DEVICE = "unsupported-device"
class StorageSasPort(ManagedObject):
"""This is StorageSasPort class."""
consts = StorageSasPortConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageSasPort", "storageSasPort", "sas-port-[id]", VersionMeta.Version312b, "InputOutput", 0x3f, [], ["read-only"], [u'storageEnclosureLocalDiskConfig', u'storageLocalDisk'], [], ["Get"])
prop_meta = {
"address": MoPropertyMeta("address", "address", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version312b, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"endpoint": MoPropertyMeta("endpoint", "endpoint", "uint", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version312b, MoPropertyMeta.NAMING, 0x8, None, None, None, [], ["0-4294967295"]),
"lc": MoPropertyMeta("lc", "lc", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "available", "deallocated", "repurposed"], []),
"link_descr": MoPropertyMeta("link_descr", "linkDescr", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"link_speed": MoPropertyMeta("link_speed", "linkSpeed", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["1-5-gbps", "12-gbps", "3-gbps", "6-gbps", "disabled", "down", "host-power-off", "unknown", "unsupported-device"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version312b, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"address": "address",
"childAction": "child_action",
"dn": "dn",
"endpoint": "endpoint",
"id": "id",
"lc": "lc",
"linkDescr": "link_descr",
"linkSpeed": "link_speed",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.address = None
self.child_action = None
self.endpoint = None
self.lc = None
self.link_descr = None
self.link_speed = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "StorageSasPort", parent_mo_or_dn, **kwargs)
| [
"[email protected]"
] | |
4b655608f8398692c28ca98e39291340429ff692 | ba949e02c0f4a7ea0395a80bdc31ed3e5f5fcd54 | /problems/greedy/Solution621.py | bbd433a5a095fd0e364fc666dd13252734d0de78 | [
"MIT"
] | permissive | akaliutau/cs-problems-python | 6bc0a74064f6e9687fe58b13763da1fdf2e1f626 | 9b1bd8e3932be62135a38a77f955ded9a766b654 | refs/heads/master | 2023-05-11T22:19:06.711001 | 2021-06-04T11:14:42 | 2021-06-04T11:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | """ Given a characters array tasks, representing the tasks a CPU needs to do,
where each letter represents a different task. Tasks could be done in any
order. Each task is done in one unit of time. For each unit of time, the CPU
could complete either one task or just be idle.
However, there is a non-negative integer n that represents the cooldown
period between two same tasks (the same letter in the array), that is that
there must be at least n units of time between any two same tasks.
Return the least number of units of times that the CPU will take to finish
all the given tasks
IDEA:
The total number of CPU intervals we need consists of busy and idle slots.
Number of busy slots is defined by the number of tasks to execute:
len(tasks). The problem is to compute a number of idle slots.
Maximum possible number of idle slots is defined by the frequency of the most
frequent task: idle_time <= (f_max - 1) * n.
Example:
ABCA, cooling=2
|AAAA|B |C |
cooling
just calculate #empty + tot_len of letters
"""
class Solution621:
pass
| [
"[email protected]"
] | |
18ed09fbe425c6e23a807270720e517c0825097d | 9dbe69085827de6c24e6315ee49c7f462ae9aa1c | /www/manage.py | b24a3a2e84ea3548cd50a6485a8d6ab59a903d4a | [] | no_license | Ggzzhh/LearnPython | f15d5c65a25f48fb8a9a43934227a455e195c541 | 397bf4e44a5595954c459883878c00cf4ee60307 | refs/heads/master | 2021-01-20T14:15:15.518408 | 2017-07-23T08:28:56 | 2017-07-23T08:28:56 | 88,743,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learning_log.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
46938072fdd88d92a0daec7ee0a0b4f408d355c2 | 1498148e5d0af365cd7fd16197174174a7fa9800 | /t000766_2.py | d782134c3d7d1053b4073e2e8647e4a5474ab4d1 | [] | no_license | feiyanshiren/myAcm | 59a2b80fe7e02787defcb152eee3eae26135322a | 00c7082d5143ddf87aeeafbdb6ce29da46dc8a12 | refs/heads/master | 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | import time
time1 = time.time()
h01 = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
h1 = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
h2 = ["11", "22", "33", "44",
"55", "66", "77", "88", "99"]
hx3 = ["1x1", "2x2", "3x3", "4x4", "5x5",
"6x6", "7x7", "8x8", "9x9"]
hx4 = ["1xx1", "2xx2", "3xx3", "4xx4", "5xx5",
"6xx6", "7xx7", "8xx8", "9xx9"]
hx5 = ["1xyx1", "2xyx2", "3xyx3", "4xyx4", "5xyx5",
"6xyx6", "7xyx7", "8xyx8", "9xyx9"]
hx6 = ["1xyyx1", "2xyyx2", "3xyyx3", "4xyyx4", "5xyyx5",
"6xyyx6", "7xyyx7", "8xyyx8", "9xyyx9"]
h3 = []
h4 = []
h5 = []
h6 = []
hy5 = []
hy6 = []
for hx3_in in hx3:
for h in h01:
s = hx3_in.replace("x", h)
h3.append(s)
for hx4_in in hx4:
for h in h01:
s = hx4_in.replace("x", h)
h4.append(s)
for hx5_in in hx5:
for h in h01:
s = hx5_in.replace("x", h)
hy5.append(s)
for hx6_in in hx6:
for h in h01:
s = hx6_in.replace("x", h)
hy6.append(s)
for hy5_in in hy5:
for h in h01:
s = hy5_in.replace("y", h)
h5.append(s)
for hy6_in in hy6:
for h in h01:
s = hy6_in.replace("y", h)
h6.append(s)
h = h1 + h2 + h3 + h4 + h5 + h6
hh = []
for i in h:
d = str(int(i) ** 2)
k = str(int(i) ** 3)
dd = d[::-1]
kk = k[::-1]
if d == dd and k == kk:
hh.append(i)
hhh = []
ss = ""
k = 0
for h in hh:
if k == 5:
hhh.append(ss.strip())
ss = h + " "
k = 1
else:
ss = ss + h + " "
k = k + 1
hhh.append(ss.strip())
for i in hhh:
print(i)
print(time.time() - time1)
| [
"[email protected]"
] | |
923176b05b13547f26b54b29c28090ef780edb2a | 017a57c810ad08ecff84652a252656afa3173e17 | /odin/utils/shape_calculation.py | d2740e55a36f13457b42b10d7a8e3a26cad4ac51 | [
"MIT"
] | permissive | SmartArduino/odin | 0189e0b71ccac311887f0fda6bafb96ca9c53a88 | 1706c91c2fbafd23018ce98bf87b3928935b2466 | refs/heads/master | 2021-01-17T08:22:48.073115 | 2017-03-04T12:15:03 | 2017-03-04T12:15:03 | 83,890,654 | 0 | 0 | null | 2017-03-04T12:07:20 | 2017-03-04T12:07:20 | null | UTF-8 | Python | false | false | 7,382 | py | from __future__ import print_function, division, absolute_import
from math import ceil
import numpy as np
# ===========================================================================
# Shape calculation for Pooling
# Contain code from theano: theano/tensor/signal/pool.py
# Copyright (c) 2008--2016, Theano Development Team
# ===========================================================================
def get_pool_output_shape(imgshape, ws, ignore_border=False,
strides=None, pad=None):
"""
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
order: (samples, pool_dim1, pool_dim2, pool_dim3, ..., input_depth)
(i.e tensorflow-NHWC format)
ws : list or tuple of N ints
Downsample factor over rows and column.
ws indicates the pool region size.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
strides : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
"""
# convert tensorflow shape to theano shape
imgshape = (imgshape[0], imgshape[-1]) + tuple(imgshape[1:-1])
ndim = len(ws)
# check valid pad (list or tuple of int)
if isinstance(pad, str):
if 'valid' in pad.lower():
pad = (0,) * ndim
elif 'same' in pad.lower():
out_shape = tuple([int(ceil(float(i) / float(j)))
for i, j in zip(imgshape[-ndim:], strides)])
return (imgshape[0],) + imgshape[2:-ndim] + out_shape + (imgshape[1],)
def compute_out(v, downsample, stride):
if ignore_border:
if downsample == stride:
return v // stride
else:
out = (v - downsample) // stride + 1
return np.maximum(out, 0)
else:
if stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample + stride) // stride) + 1
# ====== check input arguments ====== #
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if strides is None:
strides = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(imgshape[-ndim + i] + pad[i] * 2
for i in range(ndim))
out_shape = [compute_out(patch_shape[i], ws[i], strides[i])
for i in range(ndim)]
rval = tuple(imgshape[:-ndim]) + tuple(out_shape)
# convert theano shape to tensorflow shape
rval = (rval[0],) + rval[2:] + (rval[1],)
return rval
# ===========================================================================
# Shape calculation for Convolution
# Contain code from theano: theano/tensor/nnet/abstract_conv.py
# Copyright (c) 2008--2016, Theano Development Team
# ===========================================================================
def __get_conv_shape_1axis(image_shape, kernel_shape, border_mode,
subsample, dilation=1):
if None in [image_shape, kernel_shape, border_mode,
subsample, dilation]:
return None
# Implicit dilated kernel shape
dil_kernel_shape = (kernel_shape - 1) * dilation + 1
if isinstance(border_mode, str):
border_mode = border_mode.lower()
if border_mode == "half" or border_mode == "same":
pad = dil_kernel_shape // 2
elif border_mode == "full":
pad = dil_kernel_shape - 1
elif border_mode == "valid":
pad = 0
else:
pad = border_mode
if pad < 0:
raise ValueError("border_mode must be >= 0")
# In case of symbolic shape, we want to build the smallest graph
# (image_shape + 2 * pad - dil_kernel_shape) // subsample + 1
if pad == 0:
out_shp = (image_shape - dil_kernel_shape)
else:
out_shp = (image_shape + 2 * pad - dil_kernel_shape)
if subsample != 1:
out_shp = out_shp // subsample
out_shp = out_shp + 1
# ====== get exact same border_mode for theano ====== #
if (border_mode == 'half' or border_mode == 'same') and \
kernel_shape % 2 == 0:
out_shp = (image_shape + subsample - 1) // subsample
return out_shp
def get_conv_output_shape(image_shape, kernel_shape,
border_mode, subsample,
filter_dilation=None):
"""
This function compute the output shape of convolution operation.
original code: abstract_conv.py (theano)
Parameters
----------
image_shape: tuple of int (symbolic or numeric) corresponding to the input
order: (samples, conv_dim1, conv_dim2, conv_dim3, ..., input_depth)
(i.e tensorflow-NHWC format)
kernel_shape: tuple of int (symbolic or numeric) corresponding to the
order: (kernel_dim1, kernel_dim2, kernel_dim3, ..., input_depth, out_depth)
(i.e tensorflow-NHWC format)
border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
If it is a tuple, its two (or three) elements respectively correspond
to the padding on height and width (and possibly depth) axis.
subsample: tuple of int (symbolic or numeric). Its or three elements
espectively correspond to the subsampling on height and width (and
possibly depth) axis.
filter_dilation: tuple of int (symbolic or numeric). Its two elements
correspond respectively to the dilation on height and width axis.
Returns
-------
output_shape: tuple of int corresponding to the output image shape. Its
four element must correspond respectively to: batch size, number of
output channels, height and width of the image. None where undefined.
"""
# ====== convert tensorflow shape to theano shape ====== #
image_shape = (image_shape[0], image_shape[-1]) + tuple(image_shape[1:-1])
kernel_shape = (kernel_shape[-1], kernel_shape[-2]) + tuple(kernel_shape[:-2])
# ====== infer shape ====== #
bsize, imshp = image_shape[0], image_shape[2:]
nkern, kshp = kernel_shape[0], kernel_shape[2:]
if filter_dilation is None:
filter_dilation = np.ones(len(subsample), dtype='int')
if isinstance(border_mode, tuple):
out_shp = tuple(__get_conv_shape_1axis(
imshp[i], kshp[i], border_mode[i],
subsample[i], filter_dilation[i]) for i in range(len(subsample)))
else:
out_shp = tuple(__get_conv_shape_1axis(
imshp[i], kshp[i], border_mode,
subsample[i], filter_dilation[i]) for i in range(len(subsample)))
# ====== convert theano to tensorflow shape ====== #
return (bsize, ) + out_shp + (nkern,)
| [
"[email protected]"
] | |
a3cee10d2c3fa7bcdffc20880585935069d651fc | 4910c0f3d03935fc8ee03f1e9dc20dfdb2c7c04b | /Codigos estudiantes por lenguaje/PY/Bryann Valderrama/Algoritmos de Busqueda/DifferencePairSearch.py | 6c885a6c2d463ae002f1c7a54ec826b5b9e9f0a1 | [] | no_license | roca12/gpccodes | ab15eeedc0cadc0735651262887b44f1c2e65b93 | aa034a3014c6fb879ec5392c51f9714bdc5b50c2 | refs/heads/master | 2023-02-01T13:49:27.563662 | 2023-01-19T22:50:58 | 2023-01-19T22:50:58 | 270,723,328 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | '''Dado un arreglo y un numero n, buscar si existe un par cuya
diferencia es n.
- Complejidad Tiempo: O (n logn)
'''
from sys import stdin, stdout
rl = stdin.readline
wr = stdout.write
def findPair(arr, n):
size = len(arr)
i, j = 0, 1
while i < size and j < size:
if i != j and arr[j] - arr[i] == n:
wr(f'Par encontrado: {arr[i]} - {arr[j]}\n')
# return True # Encontrar solo un par
i += 1 # Encontrar todos los pares
j += 1 # Encontrar todos los pares
elif arr[j] - arr[i] < n:
j += 1
else:
i += 1
wr('Par no encontrado\n')
return False
arr = list(map(int, rl().split())) # 1 2 3 4 5 6 7
n = int(rl()) # 5
findPair(arr, n) # 1 -6 | 2 - 7
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.