blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb76168acd060f61720726e831f2a0a94fe00c53 | 260499100ef43361cbf2815e2c0eb5288755862c | /Intro/07_almostIncreasingSequence.py | 873528d2d7f438a47ea824f35b3e78211ab9d83a | [] | no_license | Cheng0639/CodeFights_Python | c4640a3cfb6be89049fd3d0d04702b06071b4e39 | 6b536b851016510ee79359e33da662de21cb3d3a | refs/heads/master | 2021-05-09T09:54:57.762593 | 2018-07-24T14:53:15 | 2018-07-24T14:53:15 | 109,243,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | def almostIncreasingSequence(sequence):
count_decreasing_sq = 0
for i in range(len(sequence) - 1):
if sequence[i + 1] <= sequence[i]:
count_decreasing_sq += 1
if (i >= 1) and (sequence[i + 1] <= sequence[i - 1]):
if (len(sequence) - 2 > i) and (sequence[i + 2] <= sequence[i]):
count_decreasing_sq += 1
if count_decreasing_sq > 1:
return False
return True
print(almostIncreasingSequence([1, 3, 2, 1]) == False)
print(almostIncreasingSequence([1, 3, 2]) == True)
print(almostIncreasingSequence([1, 2, 1, 2]) == False)
print(almostIncreasingSequence([1, 4, 10, 4, 2]) == False)
| [
"[email protected]"
] | |
8c754a532e50d2046cfeb41f5c5df7af538ee122 | f854ef28002a3931a8d8b8d0b9cc691b8a449db3 | /home-assistant/custom_components/browser_mod/binary_sensor.py | 16333806a3baa1bbffe7e5797049573fb21ccf12 | [
"MIT"
] | permissive | Burningstone91/smart-home-setup | 030cdaa13d05fb19a82b28ea455614d3276522ab | c2f34cc8b8243bc6ce620b3f03e3e44ff28150ca | refs/heads/master | 2023-02-23T06:25:04.476657 | 2022-02-26T16:05:02 | 2022-02-26T16:05:02 | 239,319,680 | 421 | 36 | MIT | 2023-02-08T01:16:54 | 2020-02-09T14:39:06 | JavaScript | UTF-8 | Python | false | false | 1,646 | py | from datetime import datetime
from homeassistant.const import (
STATE_UNAVAILABLE,
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
STATE_ON,
STATE_OFF,
)
from homeassistant.components.binary_sensor import DEVICE_CLASS_MOTION
from .helpers import setup_platform, BrowserModEntity
PLATFORM = "binary_sensor"
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
return setup_platform(hass, config, async_add_devices, PLATFORM, BrowserModSensor)
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_platform(hass, {}, async_add_entities)
class BrowserModSensor(BrowserModEntity):
domain = PLATFORM
def __init__(self, hass, connection, deviceID, alias=None):
super().__init__(hass, connection, deviceID, alias)
self.last_seen = None
def updated(self):
self.last_seen = datetime.now()
self.schedule_update_ha_state()
@property
def state(self):
if not self.connection.connection:
return STATE_UNAVAILABLE
if self.data.get("motion", False):
return STATE_ON
return STATE_OFF
@property
def is_on(self):
return not self.data.get("motion", False)
@property
def device_class(self):
return DEVICE_CLASS_MOTION
@property
def device_state_attributes(self):
return {
"type": "browser_mod",
"last_seen": self.last_seen,
ATTR_BATTERY_LEVEL: self.data.get("battery", None),
ATTR_BATTERY_CHARGING: self.data.get("charging", None),
**self.data,
}
| [
"[email protected]"
] | |
9d338275db0bb2d0043d091a349c50f9ffa5ffa9 | e9c11f173507a06b40523714591cf1c443efcd89 | /autoencode_project/vae.py | aea6bebb4650219215bdaca9cac090ed3b326ee3 | [] | no_license | IanEisenberg/CBMM | 55100773b157981122aa261d70186c42ca04b685 | 6a1e2eda7308b1334187036ef37983b940b5d186 | refs/heads/master | 2021-01-19T07:40:30.735971 | 2017-08-31T01:16:32 | 2017-08-31T01:16:32 | 100,642,436 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,793 | py |
from keras.layers import Input, Dense, Lambda, Layer
from keras.models import Model
from keras import backend as K
from keras import metrics
import numpy as np
from os import path
import pandas as pd
from scipy.stats import norm
import seaborn as sns
from sklearn.preprocessing import scale
# load data
data_loc = path.join('Data', 'behavioral_data.csv')
data_df = pd.read_csv(data_loc, index_col=0)
data = data_df.values
n_held_out = data.shape[0]//6
data_train = scale(data[n_held_out:, :]);
data_held_out = scale(data[:n_held_out,:])
# VAE
batch_size = data_train.shape[0]//5
original_dim = data_train.shape[1]
latent_dim = 4
intermediate_dim = 50
epochs = 1000
epsilon_std = 1.0
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_sigma = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_sigma = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_sigma) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_sigma])`
z = Lambda(sampling)([z_mean, z_log_sigma])
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
# end-to-end autoencoder
vae = Model(x, x_decoded_mean)
# encoder, from inputs to latent space
encoder = Model(x, z_mean)
# generator, from latent space to reconstructed inputs
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
def vae_loss(x, x_decoded_mean):
xent_loss = metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
return xent_loss + kl_loss
vae.compile(optimizer='rmsprop', loss=vae_loss)
out = vae.fit(data_train, data_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(data_held_out, data_held_out))
from matplotlib import pyplot as plt
from itertools import product
x_test_encoded = encoder.predict(data, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1])
n=8
grid=product(*[norm.ppf(np.linspace(0.05, 0.95, n))
for _ in range(latent_dim)])
samples = []
for loc in grid:
z_sample = np.array([loc])
x_decoded = generator.predict(z_sample)
samples.append(x_decoded)
samples = np.vstack(samples)
sns.heatmap(np.corrcoef(samples.T))
| [
"[email protected]"
] | |
3c1cb2eabb7a69bd4c6859d78945c5be3c53996c | 15b71fe940708d3c04581bfb012c8d61705d6108 | /compiler/src/thryft/compiler/parse_exception.py | 18fec9277995932a528ee0f15e5e57fdd1b8d97c | [
"BSD-2-Clause"
] | permissive | adam-singer/thryft | 2f426c1751c02f95c4785f499065b28b853df42d | 26cfd2148fa408aa5da8ac93bbe7b8722a0dfd8e | refs/heads/master | 2021-05-27T00:42:19.358530 | 2014-07-27T19:09:34 | 2014-07-27T19:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | #-------------------------------------------------------------------------------
# Copyright (c) 2013, Minor Gordon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#-------------------------------------------------------------------------------
from thryft.compiler.token import Token
class ParseException(Exception):
def __init__(self, message, token=None):
if token is not None:
assert isinstance(token, Token)
message = "%s at %s" % (message, repr(token))
Exception.__init__(self, message)
self.__token = token
@property
def token(self):
return self.__token
| [
"[email protected]"
] | |
78851915cd48b3fa3a3b346632c40c8be0e4232d | 51f1a5f544fd00c7449edeb28538dd99d4b5a1c2 | /spyderpro/test/insert_scenece_history_data.py | 5e97225b102be51df53dd91c6f3390ee4c72f6f0 | [] | no_license | LianZS/spyderpro | aab6f7f5c88c87f683f6cdacd19629d11da74009 | 5e34873cd13950dd3b5dc6341aad144522af0eae | refs/heads/master | 2020-05-31T10:38:30.143573 | 2020-02-16T02:43:43 | 2020-02-16T02:43:43 | 190,239,345 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | import csv
import os
import pymysql
from threading import Thread, Semaphore
user = 'root'
password = 'lzs87724158'
host = "localhost"
port = 3306
scencefilepath = os.getcwd()
city_file_path = os.getcwd()
database = 'digitalsmart'
db = pymysql.connect(host=host, user=user, password=password, database=database,
port=port)
cur = db.cursor()
sql = "select pid,area ,table_id from digitalsmart.tablemanager "
cur.execute(sql)
area_map = dict()
for pid, area, table_id in cur.fetchall():
area_map[area] = {"pid": pid, "table_id": table_id}
rootpath = os.path.abspath(os.path.curdir)
dirpath = "/Volumes/Tigo/易班项目数据/景区客流数据/"
sql_file = open("./flowdata.sql", 'a+')
for file in os.listdir(dirpath):
area = file.split(".")[0]
print(file)
try:
id_data = area_map[area]
except KeyError:
continue
pid = id_data['pid']
table_id = id_data['table_id']
filepath = dirpath + file
f = open(filepath, 'r')
r = csv.reader(f)
if table_id == 0:
sql_format = "insert into digitalsmart.historyscenceflow0(pid, ddate, ttime, num) VALUES "
elif table_id == 1:
sql_format = "insert into digitalsmart.historyscenceflow1(pid, ddate, ttime, num) VALUES "
elif table_id == 2:
sql_format = "insert into digitalsmart.historyscenceflow2 (pid, ddate, ttime, num) VALUES "
elif table_id == 3:
sql_format = "insert into digitalsmart.historyscenceflow3 (pid, ddate, ttime, num) VALUES "
elif table_id == 4:
sql_format = "insert into digitalsmart.historyscenceflow4 (pid, ddate, ttime, num) VALUES "
elif table_id == 5:
sql_format = "insert into digitalsmart.historyscenceflow5 (pid, ddate, ttime, num) VALUES "
elif table_id == 6:
sql_format = "insert into digitalsmart.historyscenceflow6 (pid, ddate, ttime, num) VALUES "
elif table_id == 7:
sql_format = "insert into digitalsmart.historyscenceflow7 (pid, ddate, ttime, num) VALUES "
elif table_id == 8:
sql_format = "insert into digitalsmart.historyscenceflow8 (pid, ddate, ttime, num) VALUES "
elif table_id == 9:
sql_format = "insert into digitalsmart.historyscenceflow9 (pid, ddate, ttime, num) VALUES "
sql_file.write(sql_format)
item = r.__next__()
date_time = item[0] # 2018-04-08 19:05:00
ddate = int(date_time.split(" ")[0].replace("-", ''))
ttime = date_time.split(" ")[1]
num = int(item[1])
value = "(%d,%d,'%s',%d)" % (pid, ddate, ttime, num)
sql_file.write(value)
for item in r:
try:
date_time = item[0] # 2018-04-08 19:05:00
ddate = int(date_time.split(" ")[0].replace("-", ''))
ttime = date_time.split(" ")[1]
num = int(item[1])
except Exception:
print(item)
continue
value = "(%d,%d,'%s',%d)" % (pid, ddate, ttime, num)
sql_file.write(",")
sql_file.write(value)
break
sql_file.write(";")
| [
"[email protected]"
] | |
ff9fc9c3b9bbd3652a26b7aa1af092f08760aecf | 4a83925d00f26b57db9a77553fbacf30735d1171 | /open/core/betterself/tests/views/test_measurement_views.py | 9bc05ae6117eed20260d67dd63e7475525afa9ea | [
"MIT"
] | permissive | mmangione/open | 581f622a6c2fe465fc7bd5cd0dc43a2a5f098248 | 5163e47ea6ba6160bf12a3ebe18bc76d078ea62c | refs/heads/master | 2021-07-15T09:34:14.355559 | 2020-10-14T09:08:14 | 2020-10-14T09:08:14 | 217,483,795 | 0 | 0 | MIT | 2020-10-14T09:08:16 | 2019-10-25T08:05:58 | Python | UTF-8 | Python | false | false | 945 | py | from django.contrib.auth import get_user_model
from test_plus import TestCase
from open.core.betterself.constants import BetterSelfResourceConstants
from open.core.betterself.factories import MeasurementFactory
from open.core.betterself.models.measurement import Measurement
from open.core.betterself.tests.mixins.resource_mixin import (
BetterSelfResourceViewTestCaseMixin,
)
User = get_user_model()
"""
python manage.py test --pattern="*test_measurement_views.py" --keepdb
"""
class TestMeasurementView(BetterSelfResourceViewTestCaseMixin, TestCase):
url_name = BetterSelfResourceConstants.MEASUREMENTS
model_class_factory = MeasurementFactory
model_class = Measurement
def test_view(self):
data = self.client_1.get(self.url).data
self.assertEqual(len(data), 5)
def test_no_access_view(self):
"""
Doesn't apply here, measurements are available for all.
"""
return
| [
"[email protected]"
] | |
919842e59adaa424c81909c1e5e0a91ed3666ca8 | a6bf211d2b31c2d7fdb927924d77ac00f1fb4d5f | /scripts/chart-total-checks-all.py | ad25b40f96661a760cd0c1901cb00d14c3cd0c9a | [
"MIT"
] | permissive | tzor1234/nics-firearm-background-checks | f96770c4d7e4ac6eed16ad2dff94717f51c8611c | 9e752e891124de16c2fd911050f853498c577260 | refs/heads/master | 2023-09-05T21:42:49.096106 | 2021-11-01T19:25:43 | 2021-11-01T19:25:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | #!/usr/bin/env python
import sys, os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
import seaborn as sb
sb.set()
checks = (
pd.read_csv(sys.stdin)
.assign(
month_dt = lambda df: pd.to_datetime(df["month"], format = "%Y-%m")
)
)
totals = checks.groupby("month_dt")["totals"].sum()
ax = totals.plot(kind="area", figsize=(12, 8), color="#000000", alpha=0.5)
ax.figure.set_facecolor("#FFFFFF")
ax.set_title(
"Monthly NICS Background Check Totals Since Nov. 1998",
fontsize=24
)
plt.setp(ax.get_yticklabels(), fontsize=12)
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
ax.set_xlabel("")
plt.savefig(sys.stdout.buffer)
| [
"[email protected]"
] | |
29800f47bd874f8ef18582517b30b1a222c6d4f7 | aa4aa51465d79e0447cbe22281f0402ca95bdaa2 | /python/project/gibbs.py | 6332c91e98afb6236718f6b588e65561703cc60e | [] | no_license | zuozuo12/usualProject | 2ca06bb7a1ff6f99343f1997053ba8d5a48e00a7 | 335bcef5d76d6cf0c84dd3209176089b3b07fbba | refs/heads/master | 2020-11-27T17:02:33.252884 | 2019-10-22T06:46:32 | 2019-10-22T06:46:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | from scipy.stats import norm
import matplotlib.pyplot as plt
import random
import math
def gibbs(N=500, thin=10):
pi = [];
x = 0
y = 0
for i in range(N):
for j in range(thin):
x = norm.rvs(loc=y, scale=2, size=1, random_state=None)
y = norm.rvs(loc=x, scale=3, size=1, random_state=None)
pi.append(x[0]);
print(pi)
return pi;
pi=gibbs()
plt.hist(pi, 100, normed=1, facecolor='red', alpha=0.7,label='Samples Distribution')
plt.show();
# import random, math
#
#
# def gibbs(N=50000, thin=1000):
# x = 0
# y = 0
# print
# "Iter x y"
# for i in range(N):
# for j in range(thin):
# x = random.gammavariate(3, 1.0 / (y * y + 4))
# y = random.gauss(1.0 / (x + 1), 1.0 / math.sqrt(2 * x + 2))
# print
# i, x, y
#
#
# gibbs() | [
"[email protected]"
] | |
9430c9ef1326c8401d3de54c76f50e4741fbcd27 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_083/ch11_2020_03_18_03_05_56_266683.py | daa8a1dbf71d778dd8294f4e1e22a98e871fce67 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import math
def distancia_euclidiana(x1,x2,y1,y2):
return (((x2-x1)**2) + ((y2-y1)**2))**1/2
a=2
b=3
c=4
d=5
e=((3-2)**2) + (5-4)**2
raiz= math.sqrt(e)
print(raiz) | [
"[email protected]"
] | |
327458930aa320ef01199a11641fa16213635e1d | f1fcd165cd8444310ce5d201e481e3982dc28110 | /medium/1901/190127/jang.py | c9913b842429b52e94d07f0ea82989fad994fc40 | [] | no_license | JoosJuliet/algoStudy | 310a71a0fcc8f3c23281544cf3458ed999040176 | 3fc1e850f9d8b9f290f41fddd59ff403fbfffa05 | refs/heads/master | 2020-04-20T19:26:25.485875 | 2019-03-27T22:37:27 | 2019-03-27T22:37:27 | 169,049,593 | 1 | 0 | null | 2019-02-04T08:43:07 | 2019-02-04T08:43:07 | null | UTF-8 | Python | false | false | 592 | py | n = int(input())
arr = list(map(int, input().split()))
enum_arr = sorted(enumerate(arr, start=1), key=lambda t: t[1])
frm = []
to = []
for i in range(1, len(enum_arr)+1):
if enum_arr[i-1][0] != i:
frm.append(i)
to.append(enum_arr[i-1][0])
cvted = list(range(frm[0], frm[-1]+1))
if len(cvted) % 2 == 1:
cvted.pop(len(cvted)//2)
if len(frm) == 0:
print("yes")
elif len(frm) == 2 and frm == to[::-1]:
print("yes")
print("swap", *frm)
elif frm == to[::-1] and frm == cvted:
print("yes")
print("reverse", frm[0], frm[-1])
else:
print("no")
| [
"[email protected]"
] | |
1711a379bd8678b699bd7719b130703e9f437539 | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/MSYS/python/Lib/test/test_venv.py | c8a36a77b3510ec2e05fc87a0bd98207927f2d4d | [
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"OpenSSL",
"Python-2.0",
"LicenseRef-scancode-newlib-historical",
"TCL",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 20,420 | py | """
Test harness for the venv module.
Copyright (C) 2011-2012 Vinay Sajip.
Licensed to the PSF under a contributor agreement.
"""
import ensurepip
import os
import os.path
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from test.support import (captured_stdout, captured_stderr, requires_zlib,
can_symlink, EnvironmentVarGuard, rmtree)
import threading
import unittest
import venv
try:
import ctypes
except ImportError:
ctypes = None
# Platforms that set sys._base_executable can create venvs from within
# another venv, so no need to skip tests that require venv.create().
requireVenvCreate = unittest.skipUnless(
hasattr(sys, '_base_executable')
or sys.prefix == sys.base_prefix,
'cannot run venv.create from within a venv on this platform')
def check_output(cmd, encoding=None):
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding)
out, err = p.communicate()
if p.returncode:
raise subprocess.CalledProcessError(
p.returncode, cmd, out, err)
return out, err
class BaseTest(unittest.TestCase):
"""Base class for venv tests."""
maxDiff = 80 * 50
def setUp(self):
self.env_dir = os.path.realpath(tempfile.mkdtemp())
if os.name == 'nt':
self.bindir = 'Scripts'
self.lib = ('Lib',)
self.include = 'Include'
else:
self.bindir = 'bin'
self.lib = ('lib', 'python%d.%d' % sys.version_info[:2])
self.include = 'include'
executable = getattr(sys, '_base_executable', sys.executable)
self.exe = os.path.split(executable)[-1]
if (sys.platform == 'win32'
and os.path.lexists(executable)
and not os.path.exists(executable)):
self.cannot_link_exe = True
else:
self.cannot_link_exe = False
def tearDown(self):
rmtree(self.env_dir)
def run_with_capture(self, func, *args, **kwargs):
with captured_stdout() as output:
with captured_stderr() as error:
func(*args, **kwargs)
return output.getvalue(), error.getvalue()
def get_env_file(self, *args):
return os.path.join(self.env_dir, *args)
def get_text_file_contents(self, *args):
with open(self.get_env_file(*args), 'r') as f:
result = f.read()
return result
class BasicTest(BaseTest):
"""Test venv module functionality."""
def isdir(self, *args):
fn = self.get_env_file(*args)
self.assertTrue(os.path.isdir(fn))
def test_defaults(self):
"""
Test the create function with default arguments.
"""
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
self.isdir(self.bindir)
self.isdir(self.include)
self.isdir(*self.lib)
# Issue 21197
p = self.get_env_file('lib64')
conditions = ((struct.calcsize('P') == 8) and (os.name == 'posix') and
(sys.platform != 'darwin'))
if conditions:
self.assertTrue(os.path.islink(p))
else:
self.assertFalse(os.path.exists(p))
data = self.get_text_file_contents('pyvenv.cfg')
executable = getattr(sys, '_base_executable', sys.executable)
path = os.path.dirname(executable)
self.assertIn('home = %s' % path, data)
fn = self.get_env_file(self.bindir, self.exe)
if not os.path.exists(fn): # diagnostics for Windows buildbot failures
bd = self.get_env_file(self.bindir)
print('Contents of %r:' % bd)
print(' %r' % os.listdir(bd))
self.assertTrue(os.path.exists(fn), 'File %r should exist.' % fn)
def test_prompt(self):
env_name = os.path.split(self.env_dir)[1]
builder = venv.EnvBuilder()
context = builder.ensure_directories(self.env_dir)
self.assertEqual(context.prompt, '(%s) ' % env_name)
builder = venv.EnvBuilder(prompt='My prompt')
context = builder.ensure_directories(self.env_dir)
self.assertEqual(context.prompt, '(My prompt) ')
@requireVenvCreate
def test_prefixes(self):
"""
Test that the prefix values are as expected.
"""
# check a venv's prefixes
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
envpy = os.path.join(self.env_dir, self.bindir, self.exe)
cmd = [envpy, '-c', None]
for prefix, expected in (
('prefix', self.env_dir),
('prefix', self.env_dir),
('base_prefix', sys.base_prefix),
('base_exec_prefix', sys.base_exec_prefix)):
cmd[2] = 'import sys; print(sys.%s)' % prefix
out, err = check_output(cmd)
self.assertEqual(out.strip(), expected.encode())
if sys.platform == 'win32':
ENV_SUBDIRS = (
('Scripts',),
('Include',),
('Lib',),
('Lib', 'site-packages'),
)
else:
ENV_SUBDIRS = (
('bin',),
('include',),
('lib',),
('lib', 'python%d.%d' % sys.version_info[:2]),
('lib', 'python%d.%d' % sys.version_info[:2], 'site-packages'),
)
def create_contents(self, paths, filename):
"""
Create some files in the environment which are unrelated
to the virtual environment.
"""
for subdirs in paths:
d = os.path.join(self.env_dir, *subdirs)
os.mkdir(d)
fn = os.path.join(d, filename)
with open(fn, 'wb') as f:
f.write(b'Still here?')
def test_overwrite_existing(self):
"""
Test creating environment in an existing directory.
"""
self.create_contents(self.ENV_SUBDIRS, 'foo')
venv.create(self.env_dir)
for subdirs in self.ENV_SUBDIRS:
fn = os.path.join(self.env_dir, *(subdirs + ('foo',)))
self.assertTrue(os.path.exists(fn))
with open(fn, 'rb') as f:
self.assertEqual(f.read(), b'Still here?')
builder = venv.EnvBuilder(clear=True)
builder.create(self.env_dir)
for subdirs in self.ENV_SUBDIRS:
fn = os.path.join(self.env_dir, *(subdirs + ('foo',)))
self.assertFalse(os.path.exists(fn))
def clear_directory(self, path):
for fn in os.listdir(path):
fn = os.path.join(path, fn)
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
rmtree(fn)
def test_unoverwritable_fails(self):
#create a file clashing with directories in the env dir
for paths in self.ENV_SUBDIRS[:3]:
fn = os.path.join(self.env_dir, *paths)
with open(fn, 'wb') as f:
f.write(b'')
self.assertRaises((ValueError, OSError), venv.create, self.env_dir)
self.clear_directory(self.env_dir)
def test_upgrade(self):
"""
Test upgrading an existing environment directory.
"""
# See Issue #21643: the loop needs to run twice to ensure
# that everything works on the upgrade (the first run just creates
# the venv).
for upgrade in (False, True):
builder = venv.EnvBuilder(upgrade=upgrade)
self.run_with_capture(builder.create, self.env_dir)
self.isdir(self.bindir)
self.isdir(self.include)
self.isdir(*self.lib)
fn = self.get_env_file(self.bindir, self.exe)
if not os.path.exists(fn):
# diagnostics for Windows buildbot failures
bd = self.get_env_file(self.bindir)
print('Contents of %r:' % bd)
print(' %r' % os.listdir(bd))
self.assertTrue(os.path.exists(fn), 'File %r should exist.' % fn)
def test_isolation(self):
"""
Test isolation from system site-packages
"""
for ssp, s in ((True, 'true'), (False, 'false')):
builder = venv.EnvBuilder(clear=True, system_site_packages=ssp)
builder.create(self.env_dir)
data = self.get_text_file_contents('pyvenv.cfg')
self.assertIn('include-system-site-packages = %s\n' % s, data)
@unittest.skipUnless(can_symlink(), 'Needs symlinks')
def test_symlinking(self):
"""
Test symlinking works as expected
"""
for usl in (False, True):
builder = venv.EnvBuilder(clear=True, symlinks=usl)
builder.create(self.env_dir)
fn = self.get_env_file(self.bindir, self.exe)
# Don't test when False, because e.g. 'python' is always
# symlinked to 'python3.3' in the env, even when symlinking in
# general isn't wanted.
if usl:
if self.cannot_link_exe:
# Symlinking is skipped when our executable is already a
# special app symlink
self.assertFalse(os.path.islink(fn))
else:
self.assertTrue(os.path.islink(fn))
# If a venv is created from a source build and that venv is used to
# run the test, the pyvenv.cfg in the venv created in the test will
# point to the venv being used to run the test, and we lose the link
# to the source build - so Python can't initialise properly.
@requireVenvCreate
def test_executable(self):
"""
Test that the sys.executable value is as expected.
"""
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
envpy = os.path.join(os.path.realpath(self.env_dir),
self.bindir, self.exe)
out, err = check_output([envpy, '-c',
'import sys; print(sys.executable)'])
self.assertEqual(out.strip(), envpy.encode())
@unittest.skipUnless(can_symlink(), 'Needs symlinks')
def test_executable_symlinks(self):
"""
Test that the sys.executable value is as expected.
"""
rmtree(self.env_dir)
builder = venv.EnvBuilder(clear=True, symlinks=True)
builder.create(self.env_dir)
envpy = os.path.join(os.path.realpath(self.env_dir),
self.bindir, self.exe)
out, err = check_output([envpy, '-c',
'import sys; print(sys.executable)'])
self.assertEqual(out.strip(), envpy.encode())
@unittest.skipUnless(os.name == 'nt', 'only relevant on Windows')
def test_unicode_in_batch_file(self):
"""
Test handling of Unicode paths
"""
rmtree(self.env_dir)
env_dir = os.path.join(os.path.realpath(self.env_dir), 'ϼўТλФЙ')
builder = venv.EnvBuilder(clear=True)
builder.create(env_dir)
activate = os.path.join(env_dir, self.bindir, 'activate.bat')
envpy = os.path.join(env_dir, self.bindir, self.exe)
out, err = check_output(
[activate, '&', self.exe, '-c', 'print(0)'],
encoding='oem',
)
self.assertEqual(out.strip(), '0')
@requireVenvCreate
def test_multiprocessing(self):
"""
Test that the multiprocessing is able to spawn.
"""
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
envpy = os.path.join(os.path.realpath(self.env_dir),
self.bindir, self.exe)
out, err = check_output([envpy, '-c',
'from multiprocessing import Pool; '
'pool = Pool(1); '
'print(pool.apply_async("Python".lower).get(3)); '
'pool.terminate()'])
self.assertEqual(out.strip(), "python".encode())
@unittest.skipIf(os.name == 'nt', 'not relevant on Windows')
def test_deactivate_with_strict_bash_opts(self):
bash = shutil.which("bash")
if bash is None:
self.skipTest("bash required for this test")
rmtree(self.env_dir)
builder = venv.EnvBuilder(clear=True)
builder.create(self.env_dir)
activate = os.path.join(self.env_dir, self.bindir, "activate")
test_script = os.path.join(self.env_dir, "test_strict.sh")
with open(test_script, "w") as f:
f.write("set -euo pipefail\n"
f"source {activate}\n"
"deactivate\n")
out, err = check_output([bash, test_script])
self.assertEqual(out, "".encode())
self.assertEqual(err, "".encode())
@requireVenvCreate
class EnsurePipTest(BaseTest):
"""Test venv module installation of pip."""
def assert_pip_not_installed(self):
envpy = os.path.join(os.path.realpath(self.env_dir),
self.bindir, self.exe)
out, err = check_output([envpy, '-c',
'try:\n import pip\nexcept ImportError:\n print("OK")'])
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
err = err.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(err, "")
out = out.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(out.strip(), "OK")
def test_no_pip_by_default(self):
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
self.assert_pip_not_installed()
def test_explicit_no_pip(self):
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir, with_pip=False)
self.assert_pip_not_installed()
def test_devnull(self):
# Fix for issue #20053 uses os.devnull to force a config file to
# appear empty. However http://bugs.python.org/issue20541 means
# that doesn't currently work properly on Windows. Once that is
# fixed, the "win_location" part of test_with_pip should be restored
with open(os.devnull, "rb") as f:
self.assertEqual(f.read(), b"")
# Issue #20541: os.path.exists('nul') is False on Windows
if os.devnull.lower() == 'nul':
self.assertFalse(os.path.exists(os.devnull))
else:
self.assertTrue(os.path.exists(os.devnull))
def do_test_with_pip(self, system_site_packages):
rmtree(self.env_dir)
with EnvironmentVarGuard() as envvars:
# pip's cross-version compatibility may trigger deprecation
# warnings in current versions of Python. Ensure related
# environment settings don't cause venv to fail.
envvars["PYTHONWARNINGS"] = "e"
# ensurepip is different enough from a normal pip invocation
# that we want to ensure it ignores the normal pip environment
# variable settings. We set PIP_NO_INSTALL here specifically
# to check that ensurepip (and hence venv) ignores it.
# See http://bugs.python.org/issue19734
envvars["PIP_NO_INSTALL"] = "1"
# Also check that we ignore the pip configuration file
# See http://bugs.python.org/issue20053
with tempfile.TemporaryDirectory() as home_dir:
envvars["HOME"] = home_dir
bad_config = "[global]\nno-install=1"
# Write to both config file names on all platforms to reduce
# cross-platform variation in test code behaviour
win_location = ("pip", "pip.ini")
posix_location = (".pip", "pip.conf")
# Skips win_location due to http://bugs.python.org/issue20541
for dirname, fname in (posix_location,):
dirpath = os.path.join(home_dir, dirname)
os.mkdir(dirpath)
fpath = os.path.join(dirpath, fname)
with open(fpath, 'w') as f:
f.write(bad_config)
# Actually run the create command with all that unhelpful
# config in place to ensure we ignore it
try:
self.run_with_capture(venv.create, self.env_dir,
system_site_packages=system_site_packages,
with_pip=True)
except subprocess.CalledProcessError as exc:
# The output this produces can be a little hard to read,
# but at least it has all the details
details = exc.output.decode(errors="replace")
msg = "{}\n\n**Subprocess Output**\n{}"
self.fail(msg.format(exc, details))
# Ensure pip is available in the virtual environment
envpy = os.path.join(os.path.realpath(self.env_dir), self.bindir, self.exe)
# Ignore DeprecationWarning since pip code is not part of Python
out, err = check_output([envpy, '-W', 'ignore::DeprecationWarning', '-I',
'-m', 'pip', '--version'])
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
err = err.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(err, "")
out = out.decode("latin-1") # Force to text, prevent decoding errors
expected_version = "pip {}".format(ensurepip.version())
self.assertEqual(out[:len(expected_version)], expected_version)
env_dir = os.fsencode(self.env_dir).decode("latin-1")
self.assertIn(env_dir, out)
# http://bugs.python.org/issue19728
# Check the private uninstall command provided for the Windows
# installers works (at least in a virtual environment)
with EnvironmentVarGuard() as envvars:
out, err = check_output([envpy,
'-W', 'ignore::DeprecationWarning', '-I',
'-m', 'ensurepip._uninstall'])
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
err = err.decode("latin-1") # Force to text, prevent decoding errors
# Ignore the warning:
# "The directory '$HOME/.cache/pip/http' or its parent directory
# is not owned by the current user and the cache has been disabled.
# Please check the permissions and owner of that directory. If
# executing pip with sudo, you may want sudo's -H flag."
# where $HOME is replaced by the HOME environment variable.
err = re.sub("^(WARNING: )?The directory .* or its parent directory "
"is not owned by the current user .*$", "",
err, flags=re.MULTILINE)
self.assertEqual(err.rstrip(), "")
# Being fairly specific regarding the expected behaviour for the
# initial bundling phase in Python 3.4. If the output changes in
# future pip versions, this test can likely be relaxed further.
out = out.decode("latin-1") # Force to text, prevent decoding errors
self.assertIn("Successfully uninstalled pip", out)
self.assertIn("Successfully uninstalled setuptools", out)
# Check pip is now gone from the virtual environment. This only
# applies in the system_site_packages=False case, because in the
# other case, pip may still be available in the system site-packages
if not system_site_packages:
self.assert_pip_not_installed()
# Issue #26610: pip/pep425tags.py requires ctypes
@unittest.skipUnless(ctypes, 'pip requires ctypes')
@requires_zlib
def test_with_pip(self):
self.do_test_with_pip(False)
self.do_test_with_pip(True)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
4d51c21e128583698658ed9af9d417243d4275fe | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /raw_scripts/132.230.102.123-10.21.11.29/1569578047.py | 23acb9e0de056e6062a2453e8fecf4cce4d3df5d | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | import functools
import typing
import string
import random
import pytest
# Lösung Teil 1.
def list_filter(x, xs):
"""
x: Int
xs: List
Diese funktion returnt die werte der liste, welche kleiner als x sind."""
L = []
for i in xs:
if i <= x:
L.append(i)
return L
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(x, xs):
nonlocal covered, count
if xs == []:
covered.add(0)
if len (xs) == 1:
covered.add(1)
if len (xs) > 1:
covered.add(2)
if x in xs:
covered.add(3)
if len ([y for y in xs if y < x]):
covered.add(4)
if len ([y for y in xs if y > x]):
covered.add(5)
r = func (x, xs)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
list_filter = coverage(list_filter)
except:
pass
# Lösung Teil 2. (Test)
def test_list_filter():
assert (list_filter(3, [1,2,3,4,5,6]) == [1,2,3])
assert (list_filter(-1, [-2, -1, 0, 1]) == [-2, -1])
assert (list_filter(1, [2,3,4,5] == [])
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_list_filter (self):
assert list_filter
assert 'x' in getfullargspec(list_filter).args
assert 'xs' in getfullargspec(list_filter).args
class TestGrades:
def test_docstring_present(self):
assert list_filter.__doc__ is not None
def test_typing_present(self):
assert list_filter.__hints__ == typing.get_type_hints(self.list_filter_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def list_filter_oracle(self, x:int, xs:list)->list:
return [ y for y in xs if y <= x ]
def check_filter (self, x, xs):
assert list_filter (x, xs) == self.list_filter_oracle (x, xs)
def test_correctness(self):
for i in range (100):
l = random.randrange (6)
xs = [ random.randrange (10) for z in range(l) ]
x = random.randrange (10)
self.check_filter (x, xs)
| [
"[email protected]"
] | |
10992b85ca307716f9540574e49c699529f6575f | 59b18dec434fc54cdaf6fd6c224fea9c783f2043 | /MyBlog/comments/migrations/0001_initial.py | a6c7e5dfd249072e98c81326c4d5ee7adbb88c97 | [] | no_license | InformationX/MyBlog | 00e95f27a78be39338fbaa462b1fa069cdfad3e6 | 2a5b15535c17b0eee0d1fa9bcebc5f7207dd46db | refs/heads/master | 2021-04-10T00:54:09.002419 | 2019-10-09T11:56:24 | 2019-10-09T11:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | # Generated by Django 2.1.4 on 2019-06-13 09:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0004_auto_20190613_1527'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(verbose_name='正文')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article', verbose_name='文章')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
('parent_comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment', verbose_name='上级评论')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
'ordering': ['created_time'],
},
),
]
| [
"[email protected]"
] | |
ea7744864101673132321095636165fa4a0cbc9b | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/dataflow/modeling/model/model_serializer.py | 8e578fb7093c3752167b0baac4b745d5530625d1 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 4,831 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from rest_framework import serializers
class CreateModelSerializer(serializers.Serializer):
"""
{
"last_sql": "create table xxx run model xxx",
"project_id": 123,
"model_name": "abc_model",
"model_alias": "阿彼此",
"is_public": True,
"description": "........",
"result_table_ids": ['xx', 'xxx'],
}
"""
last_sql = serializers.CharField(required=True, label="the last sql in the cell")
project_id = serializers.IntegerField(required=True, label="project id")
model_name = serializers.CharField(required=True, label="basic model")
model_alias = serializers.CharField(required=False, label="model alias", default=None)
is_public = serializers.BooleanField(required=False, label="is it public", default=False)
description = serializers.CharField(
required=False,
label="description",
allow_null=True,
allow_blank=True,
default=None,
)
result_table_ids = serializers.ListField(required=True, label="The result tables of the current note")
experiment_name = serializers.CharField(required=False, label="experiment name")
experiment_id = serializers.IntegerField(required=False, label="experiment id", default=None)
evaluation_result = serializers.DictField(required=False, label="model evaluation information", default={})
notebook_id = serializers.IntegerField(required=False, label="model notebook id", default=0)
class InspectionBeforeRelease(serializers.Serializer):
"""
{
"last_sql": "create table xxx run model xxx",
}
"""
sql = serializers.CharField(required=True, label="sql block in the cell")
class UpdateModelSerializer(serializers.Serializer):
"""
{
"last_sql": "create table xxx run model xxx",
"project_id": 123,
"description": "........",
"result_table_ids": ['xx', 'xxx'],
}
"""
last_sql = serializers.CharField(required=True, label="the last sql in the cell")
project_id = serializers.IntegerField(required=True, label="project id")
description = serializers.CharField(required=False, label="description", allow_null=True, allow_blank=True)
result_table_ids = serializers.ListField(required=True, label="The result tables of the current note")
experiment_name = serializers.CharField(required=False, label="experiment name", allow_null=True, allow_blank=True)
experiment_id = serializers.IntegerField(required=False, label="experiment id", default=None, allow_null=True)
evaluation_result = serializers.DictField(required=False, label="model evaluation information", default={})
class GetAlgorithmListSerializer(serializers.Serializer):
"""
{
'framework':'spark_mllib'
}
"""
framework = serializers.CharField(required=False, label="algorithm frame")
class GetReleaseResultSerializer(serializers.Serializer):
"""
{
"task_id": 123
}
"""
task_id = serializers.IntegerField(required=True, label="release task id")
class GetProjectReleaseSerializer(serializers.Serializer):
"""
{
"project_id": 123
}
"""
project_id = serializers.IntegerField(required=True, label="project id")
class GetUpdateReleaseSerializer(serializers.Serializer):
"""
{
"project_id": 123
}
"""
project_id = serializers.IntegerField(required=True, label="project id")
| [
"[email protected]"
] | |
f2febac8f4268f36933396a59726a5d1d8eaee71 | 31747dd8c61085421d7bd4166f7bd4f9429cf914 | /tests/test_visitors/test_ast/test_naming/conftest.py | c365a79530f1db42a9abe5b5ddc0d39fee60ac86 | [
"MIT"
] | permissive | edytagarbarz/wemake-python-styleguide | 0e9ed4080a13a6727b8e80785e113b8407409352 | 74b86156d73c2a4fe9c755138f6953fec41fab3b | refs/heads/master | 2021-03-03T19:21:54.807089 | 2020-03-07T23:35:15 | 2020-03-07T23:35:15 | 245,981,718 | 1 | 1 | MIT | 2020-03-09T08:31:55 | 2020-03-09T08:31:54 | null | UTF-8 | Python | false | false | 4,646 | py | import pytest
from wemake_python_styleguide.compat.constants import PY38
# Imports:
import_alias = """
import os as {0}
"""
from_import_alias = """
from os import path as {0}
"""
# Function names:
function_name = 'def {0}(): ...'
method_name = """
class Input(object):
def {0}(self): ...
"""
# Function arguments:
function_argument = 'def test(arg, {0}): ...'
method_argument = """
class Input(object):
def validate(self, {0}): ...
"""
function_keyword_argument = 'def test(arg, {0}=None): ...'
method_keyword_argument = """
class Input(object):
def validate(self, {0}=None): ...
"""
function_args_argument = 'def test(arg, *{0}): ...'
function_kwargs_argument = 'def test(arg, **{0}): ...'
method_args_argument = """
class Input(object):
def validate(self, *{0}): ...
"""
method_kwargs_argument = """
class Input(object):
def validate(self, **{0}): ...
"""
function_posonly_argument = """
def test({0}, /): ...
"""
function_kwonly_argument = """
def test(*, {0}): ...
"""
function_kwonly_default_argument = """
def test(*, {0}=True): ...
"""
method_kwonly_argument = """
class Input(object):
def test(self, *, {0}=True): ...
"""
lambda_argument = 'lambda {0}: ...'
# Class attributes:
static_attribute = """
class Test:
{0} = None
"""
static_typed_attribute = """
class Test:
{0}: int = None
"""
static_typed_annotation = """
class Test:
{0}: int
"""
instance_attribute = """
class Test(object):
def __init__(self):
self.{0} = 123
"""
instance_typed_attribute = """
class Test(object):
def __init__(self):
self.{0}: int = 123
"""
# Variables:
variable_def = """
{0} = 'test'
"""
variable_typed_def = """
{0}: str = 'test'
"""
variable_typed = """
{0}: str
"""
# See: https://github.com/wemake-services/wemake-python-styleguide/issues/405
unpacking_variables = """
first.attr, {0} = range(2)
"""
unpacking_star_variables = """
first, *{0} = range(2)
"""
for_variable = """
def container():
for {0} in []:
...
"""
for_star_variable = """
def container():
for index, *{0} in []:
...
"""
with_variable = """
def container():
with open('test.py') as {0}:
...
"""
with_star_variable = """
def container():
with open('test.py') as (first, *{0}):
...
"""
exception = """
try:
1 / 0
except Exception as {0}:
raise
"""
# Fixtures:
_ALL_FIXTURES = frozenset((
# Imports:
import_alias,
from_import_alias,
# Function names, we don't use async function because we generate them:
function_name,
method_name,
# Function arguments:
function_argument,
method_argument,
function_keyword_argument,
method_keyword_argument,
function_args_argument,
function_kwargs_argument,
method_args_argument,
method_kwargs_argument,
function_kwonly_argument,
function_kwonly_default_argument,
method_kwonly_argument,
lambda_argument,
# Class attributes:
static_attribute,
static_typed_attribute,
static_typed_annotation,
instance_attribute,
instance_typed_attribute,
# Variables:
variable_def,
variable_typed_def,
variable_typed,
unpacking_variables,
unpacking_star_variables,
for_variable,
for_star_variable,
with_variable,
with_star_variable,
exception,
))
if PY38:
_ALL_FIXTURES |= {function_posonly_argument}
_SUITABLE_FOR_UNUSED_TUPLE = frozenset((
unpacking_variables,
variable_def,
with_variable,
))
_SUITABLE_FOR_UNUSED = _SUITABLE_FOR_UNUSED_TUPLE | frozenset((
variable_typed_def,
variable_typed,
exception,
))
@pytest.fixture(params=_ALL_FIXTURES)
def naming_template(request):
"""Parametrized fixture that contains all possible naming templates."""
return request.param
@pytest.fixture(params=_SUITABLE_FOR_UNUSED)
def forbidden_unused_template(request):
"""Returns template that can be used to define wrong unused variables."""
return request.param
@pytest.fixture(params=_SUITABLE_FOR_UNUSED_TUPLE)
def forbidden_tuple_unused_template(request):
"""Returns template that can be used to define wrong unused tuples."""
return request.param
@pytest.fixture(params=_SUITABLE_FOR_UNUSED | {
static_attribute,
static_typed_attribute,
static_typed_annotation,
})
def forbidden_raw_unused_template(request):
"""Returns template that can be used to define wrong unused tuples."""
return request.param
@pytest.fixture(params=_ALL_FIXTURES - _SUITABLE_FOR_UNUSED)
def allowed_unused_template(request):
"""Returns template that can define unused variables."""
return request.param
| [
"[email protected]"
] | |
e80bab55c48ceccf23ecc4e67e62307d15f29969 | 929d12e11ed2fb69476b9d07932e38662f0ce6fc | /Queues/Reversing Elements Of Queue.py | 468388212a76ab4d1e361cdeb721eb786c208301 | [] | no_license | arnabs542/Data-Structures-And-Algorithms | b8f341a31ca18044bf179294fbcb0fac1f835216 | ffcc2f8a25520ce37cd1f67e6225281c85141a65 | refs/heads/master | 2022-12-13T14:09:55.005341 | 2020-09-13T11:58:58 | 2020-09-13T11:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | """
Reversing Elements Of Queue
Problem Description
Given an array of integers A and an integer B. We need to reverse the order of the first B elements of the array, leaving the other elements in the same relative order.
NOTE: You are required to first insert elements into an auxiliary queue then perform Reversal of first B elements.
Problem Constraints
1 <= B <= length of the array <= 200000
1 <= A[i] <= 100000
Input Format
The argument given is the integer array A and an integer B.
Output Format
Return an array of integer after reversing the first B elements of A using queue.
Example Input
Input 1:
A = [1, 2, 3, 4, 5]
B = 3
Input 2:
A = [5, 17, 100, 11]
B = 2
Example Output
Output 1:
[3, 2, 1, 4, 5]
Output 2:
[17, 5, 100, 11]
Example Explanation
Explanation 1:
Reverse first 3 elements so the array becomes [3, 2, 1, 4, 5]
Explanation 2:
Reverse first 2 elements so the array becomes [17, 5, 100, 11]
"""
from collections import deque
class Solution:
# @param A : list of integers
# @param B : integer
# @return a list of integers
def solve(self, A, B):
helper = deque()
for i in range(B):
helper.append(A[i])
index = B - 1
while len(helper) != 0:
temp = helper.popleft()
A[index] = temp
index -= 1
return A
| [
"[email protected]"
] | |
c541305cafe96c7c8667b45be1618611178980d9 | 23ec357d5df7addf06cb70c10ba9173521c70a9b | /core/migrations/0018_auto_20210621_0048.py | 9a7276bde29db5d951039b362455c331c431ca68 | [] | no_license | blimp666/d_job | b8e8b93ef6b94e24a38bd94195a779bfff7f3c30 | 18904ac12af6593bf59b1ba379f722bd69d00863 | refs/heads/main | 2023-06-07T21:50:34.596128 | 2021-06-22T11:15:20 | 2021-06-23T19:36:48 | 376,893,878 | 0 | 0 | null | 2021-06-15T19:30:46 | 2021-06-14T16:48:17 | Python | UTF-8 | Python | false | false | 911 | py | # Generated by Django 3.2.4 on 2021-06-21 00:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0017_auto_20210619_1206'),
]
operations = [
migrations.AlterField(
model_name='application',
name='file',
field=models.FileField(default='', upload_to='app_files', verbose_name='Работа'),
),
migrations.AlterField(
model_name='conference',
name='date_start',
field=models.DateField(default=datetime.datetime(2021, 6, 21, 0, 48, 43, 294739), verbose_name='Дата проведения'),
),
migrations.AlterField(
model_name='conference',
name='file',
field=models.FileField(default='', upload_to='conf_files', verbose_name='Вложения'),
),
]
| [
"[email protected]"
] | |
f301d8c1426299d986f583448ef5069d417a5f45 | 30a2f77f5427a3fe89e8d7980a4b67fe7526de2c | /analyze/BHistograms_trigjetht_eta1p7_CSVM_cfg.py | 4ae59fa9d3b2fae9f267eb69dbd0e0b841e7c040 | [] | no_license | DryRun/QCDAnalysis | 7fb145ce05e1a7862ee2185220112a00cb8feb72 | adf97713956d7a017189901e858e5c2b4b8339b6 | refs/heads/master | 2020-04-06T04:23:44.112686 | 2018-01-08T19:47:01 | 2018-01-08T19:47:01 | 55,909,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,788 | py | import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
import sys
options = VarParsing.VarParsing()
options.register('inputFiles',
'/uscms/home/dryu/eosdir/BJetPlusX/QCDBEventTree_BJetPlusX_Run2012B_v1_3/160429_121519/0000/QCDBEventTree_567.root',
VarParsing.VarParsing.multiplicity.list,
VarParsing.VarParsing.varType.string,
"List of input files"
)
options.register('outputFile',
'BHistograms_trigjetht_CSVL.root',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Output file"
)
options.register('dataSource',
'collision_data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'collision_data or simulation'
)
options.register('dataType',
'data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'data, signal, or background'
)
options.register('signalMass',
750.,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
'Signal mass hypothesis (only necessary for running over signal)'
)
options.parseArguments()
if options.dataSource != "collision_data" and options.dataSource != "simulation":
print "[BHistograms_BJetPlusX_loose] ERROR : dataSource must be collision_data or simulation"
sys.exit(1)
if not options.dataType in ["data", "signal", "background"]:
print "[BHistograms_BJetPlusX_loose] ERROR : dataType must be data, signal, or background"
sys.exit(1)
process = cms.Process("myprocess")
process.TFileService=cms.Service("TFileService",fileName=cms.string(options.outputFile))
##-------------------- Define the source ----------------------------
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
##-------------------- Cuts ------------------------------------------
# Cuts on the leading two jets
dijet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxAbsEta"),
parameters = cms.vdouble(1.7),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("IsTightID"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxMuonEnergyFraction"),
parameters = cms.vdouble(0.8),
descriptors = cms.vstring()
),
)
# Cuts on all PF jets (defines the generic jet collection for e.g. making fat jets)
pfjet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxAbsEta"),
parameters = cms.vdouble(5),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("IsLooseID"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
)
# Cuts on calo jets
calojet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
)
)
# Event cuts
event_cuts = cms.VPSet(
cms.PSet(
name = cms.string("TriggerOR"),
parameters = cms.vdouble(),
descriptors = cms.vstring("HLT_PFHT650_v5", "HLT_PFHT650_v6", "HLT_PFHT650_v7", "HLT_PFHT650_v8", "HLT_PFHT650_v9", "HLT_PFNoPUHT650_v1", "HLT_PFNoPUHT650_v3", "HLT_PFNoPUHT650_v4", "HLT_HT750_v1", "HLT_HT750_v2", "HLT_HT750_v3", "HLT_HT750_v4", "HLT_HT750_v5", "HLT_HT750_v7")
),
cms.PSet(
name = cms.string("MaxMetOverSumEt"),
parameters = cms.vdouble(0.5),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("GoodPFDijet"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinNCSVM"),
parameters = cms.vdouble(2),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinLeadingPFJetPt"),
parameters = cms.vdouble(80.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinSubleadingPFJetPt"),
parameters = cms.vdouble(70.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("PFDijetMaxDeltaEta"),
parameters = cms.vdouble(1.3),
descriptors = cms.vstring()
)
)
##-------------------- User analyzer --------------------------------
process.BHistograms = cms.EDAnalyzer('BHistograms',
file_names = cms.vstring(options.inputFiles),
tree_name = cms.string('ak5/ProcessedTree'),
trigger_histogram_name = cms.string('ak5/TriggerNames'),
#triggers = cms.vstring('HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v2:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v3:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v4:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v5:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v7:L1_DoubleJetC36'),
#triggers = cms.vstring( 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v2:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v3:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v4:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v5:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v7:L1_SingleJet128'),
data_source = cms.string(options.dataSource),
data_type = cms.string(options.dataType),
signal_mass = cms.double(options.signalMass),
max_events = cms.int32(-1),
dijet_cuts = dijet_cuts,
pfjet_cuts = pfjet_cuts,
calojet_cuts = calojet_cuts,
event_cuts = event_cuts,
fatjet_delta_eta_cut = cms.double(1.1),
btag_wp_1 = cms.string('CSVM'),
btag_wp_2 = cms.string('CSVM'),
)
process.p = cms.Path(process.BHistograms)
| [
"[email protected]"
] | |
b3573574645dab4a14085dbbfc1ceed3c185f247 | 231f8a898b20e475a5cabff439600de211d825c0 | /superlists/urls.py | 1b817a2f4121136d79814f362f95857a779149c2 | [
"MIT"
] | permissive | thewchan/superlists | f7370b341ce7c37b8cae506eb5bafdd2fb31b07a | af41636b2cdafb45c638e36076b9cdefc5586aad | refs/heads/master | 2023-05-26T11:01:24.310480 | 2021-06-11T21:12:20 | 2021-06-11T21:12:20 | 361,209,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from accounts import urls as accounts_urls
from lists import views as list_views, urls as list_urls
urlpatterns = [
url(r"^$", list_views.home_page, name="home"),
url(r"^lists/", include(list_urls)),
url(r"^accounts/", include(accounts_urls)),
]
| [
"[email protected]"
] | |
3b6cce24b6bf09f2d048fa65cbaea2b432e16d92 | 4dda597dac544b237cf8f8b04b3c9e662b988a92 | /11-1/mydata/applitions/views.py | 8f8ffa4a104ff5767a5aa416a9ebb19e76647a2d | [] | no_license | beriuta/history | 02ac9414c0475fde59f6a455c23c56235fe3c4bc | 026c965f694e84120825069bedf7bfac235318b5 | refs/heads/master | 2020-04-11T17:24:39.558174 | 2018-12-16T02:08:18 | 2018-12-16T02:08:18 | 161,959,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | from django.shortcuts import render
from datetime import datetime,timedelta
# Create your views here.
def t(request):
name = '测试'
d1 = {'name': '小鬼', 'age': 18, 'hobby': 'eat', 'items': 'hello'}
class People:
def __init__(self, name, age):
self.name = name
self.age = age
@staticmethod
def dream():
return 'Write the code,Change the world'
p5 = People('小懒虫', 1)
p1 = People('小明', 23)
p2 = People('小红', 13)
p3 = People('小白', 15)
list1 = [p1, p2, p3]
list2 = ['深圳', '上海', '北京', '广州', '东莞', '威海', '青岛', '潍坊']
return render(request, 't.html', {'name': name, 'd1': d1, 'f5': p5, 'list': list1, 'list2': list2})
def s(request):
l = [11, 32, 73]
name = '小懒虫'
class Food:
def __init__(self, name, kg):
self.name = name
self.kg = kg
# @staticmethod #当一个函数用不到self的时候,可以在上面添加一个装饰器
def dream(self):
return '{}的梦想:世界唯有美食不可辜负!'.format(self.name) # 这里不能用print,不然会直接在后台打印,页面不显示
duck = Food('烤鸭', 2)
pig = Food('烤猪', 50)
sheep = Food('烤全羊', 30)
chicken = Food('炸鸡', 23)
lst = [pig, sheep, chicken]
return render(request, 's.html',
{
'l': l,
'name': name,
'food': duck,
'lst': lst
}
)
# filter语法相关
def m(request):
name = 'Beriuta'
file_size = 10000
a = '<a href="https://www.baidu.com">百度</a>'
p = '在苍茫的大海上,狂风卷积着乌云,在乌云和大海之间,海燕像黑色的闪电,在高傲地飞翔!'
p_1 = '在 苍 茫 的 大 海 上,狂风卷积着乌云,在乌云和大海之间,海燕像黑色的闪电,在高傲地飞翔!'
p_2 = 'aaabsbshsasjahahaayaha'
now = datetime.now() # 获取一个datetime类型的时间
list1 = ['huhu','hehe','didi','shil','sb']
# 获取五个小时之前的时间
hours = now - timedelta(hours=4)
return render(request, 'm.html',
{'name': name,
'file_size':file_size,
'now':now,
'a':a,
'p':p,
'p_1':p_1,
'p_2':p_2,
'list1':list1,
'hours':hours
}
)
| [
"[email protected]"
] | |
597fbc69d4aed8fb08abfb60305685cb6ea0b27e | 5db3009eb36afe7110ed5402be3a9e570c58c540 | /my_plugins/YouCompleteMe/third_party/ycmd/third_party/jedi_deps/jedi/test/completion/import_tree/flow_import.py | a0a779ecab8b9a6a6806a92a54c8aa4a01ba5453 | [
"GPL-3.0-only",
"GPL-1.0-or-later",
"MIT"
] | permissive | imfangli/vimrc | ced2c6caece1cf19421c6ea7deb017bec4ca3a27 | d2d14e7d083d70cc8627ddccb5b99c53c3c38be3 | refs/heads/master | 2022-02-01T00:34:31.855421 | 2022-01-22T15:57:28 | 2022-01-22T15:57:28 | 211,766,038 | 2 | 0 | MIT | 2019-09-30T03:15:03 | 2019-09-30T03:15:02 | null | UTF-8 | Python | false | false | 39 | py | if name:
env = 1
else:
env = 2
| [
"[email protected]"
] | |
676e52de08ecbf08fbb59988ca58614c255892d8 | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_RailCrossingArray.py | c5485493b9388cf7c0c77f72f2f28ac09a8f6105 | [
"MIT"
] | permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,277 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vector_map_msgs/RailCrossingArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import vector_map_msgs.msg
import std_msgs.msg
class RailCrossingArray(genpy.Message):
_md5sum = "62d7f260c71b469b058ab28f3bce2ded"
_type = "vector_map_msgs/RailCrossingArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
RailCrossing[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: vector_map_msgs/RailCrossing
# Ver 1.00
int32 id
int32 aid
int32 linkid
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','vector_map_msgs/RailCrossing[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RailCrossingArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
else:
self.header = std_msgs.msg.Header()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_3i().pack(_x.id, _x.aid, _x.linkid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.RailCrossing()
_x = val1
start = end
end += 12
(_x.id, _x.aid, _x.linkid,) = _get_struct_3i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_3i().pack(_x.id, _x.aid, _x.linkid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.RailCrossing()
_x = val1
start = end
end += 12
(_x.id, _x.aid, _x.linkid,) = _get_struct_3i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_3i = None
def _get_struct_3i():
global _struct_3i
if _struct_3i is None:
_struct_3i = struct.Struct("<3i")
return _struct_3i
| [
"[email protected]"
] | |
00465e54f148270106631a6302c36d295425dfe7 | 4e678f6967745c3da2a3e8899204e34d99fc3be0 | /python-quest-1.py | 6e6922acc790a0eff4ffcedd901c28ea8e2f4366 | [] | no_license | pratikshah1701/hackerrank | bf689a583b462c0fde697abd536ae0638d14ced9 | 73fde2a0d9dc063e1c06d42f0572ce01f5dd04b9 | refs/heads/master | 2021-01-19T18:47:15.278811 | 2017-04-13T09:16:27 | 2017-04-13T09:16:27 | 88,381,516 | 1 | 1 | null | 2017-04-16T00:20:00 | 2017-04-16T00:20:00 | null | UTF-8 | Python | false | false | 226 | py | #!/usr/bin/env python3
def main():
for i in range(1, int(input())): # More than 2 lines will result in 0 score. Do not leave a blank line also
print((10 ** i - 1) // 9 * i)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6b6f314619290d1abeedd0e25f056113994f73bd | 4861398f925ae2cc01189577c71c4983fd7d097b | /kakao_().py | 88bd003daf8d8bcebc4b1e7d1320be8a73a7a17d | [] | no_license | fightnyy/programmers_algorithm | 1c682220aedc078f7a184109f06aa95f673d1d43 | 5efe26a12661f3d278bfcca66753ccfd10451002 | refs/heads/master | 2023-05-02T15:53:22.116662 | 2021-04-27T00:59:30 | 2021-04-27T00:59:30 | 328,570,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | def solution(p):
answer = ''
def _is_empty(inputs_e):
if inputs_e == "":
return ""
def _divide(inputs_d):
u, v = inputs_d[:2], inputs_d[2:]
return u,v
def _is_corret(inputs_c):
stack = []
for c in inputs_c:
if c == '(':
stack.append(c)
else:
if stack == []:
return False
else:
stack.pop()
if stack !=[]:
return False
return True
def _check(input_ch):
_is_empty(input_ch)
def _lets_4() :
'('+""
u, v = _divide(p):
_check(v) if _is_correct(u) _lets_4()
return answer | [
"[email protected]"
] | |
6ff0110e98ad5e642d3aed34dc2cf15987a7b382 | ad553dd718a8df51dabc9ba636040da740db57cf | /.history/app_20181209011747.py | f315e62e16bac6e29a0065fac970aaec00ade91b | [] | no_license | NergisAktug/E-Commerce-PythonWithFlask-Sqlite3 | 8e67f12c28b11a7a30d13788f8dc991f80ac7696 | 69ff4433aa7ae52ef854d5e25472dbd67fd59106 | refs/heads/main | 2023-01-01T14:03:40.897592 | 2020-10-19T20:36:19 | 2020-10-19T20:36:19 | 300,379,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,172 | py | import datetime
import sqlite3 as sql
from flask import Flask,flash, request, render_template_string, render_template
from flask import Flask, url_for, render_template, request, redirect, session, escape, render_template_string
from flask_babelex import Babel
from flask_sqlalchemy import SQLAlchemy
from flask_user import current_user, login_required, roles_required
from sqlalchemy.sql import table, column, select
from sqlalchemy import MetaData, create_engine
from flask_user import login_required, roles_required, UserManager, UserMixin
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <[email protected]>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "[email protected]"
def create_app():
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
def __init__(self, email, sifre):
self.email = email
self.sifre = sifre
self.rolId = 0
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
urunismi = db.Column(db.String(80))
urunresmi = db.Column(db.String(80))
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer(), db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, urunismi, urunresmi, urunFiyati,markaId):
self.urunismi =urunismi
self.urunresmi = urunresmi
self.urunFiyati = urunFiyati
self.markaId=markaId
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
siparisno = db.Column(db.Integer)
siparisTarihi = db.Column(db.Integer)
odemeId = db.Column(db.Integer())
def __init__(self, musteriId, urunId, siparisno, siparisTarihi, odemeId):
self.musteriId = musteriId
self.urunId = urunId
self.siparisno = siparisno
self.siparisTarihi = siparisTarihi
self.odemeId = odemeId
db.create_all()
@app.route('/')
def anasayfa():
return render_template('index.html')
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
yeniKullanici = Kullanici(email=mail, sifre=parola)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/admin')
def admin():
return render_template("admin.html")
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return render_template("uyeGirisi.html")
@app.route('/giris', methods=['GET', 'POST'])
def giris():
hata=None
if request.method=='POST':
if request.form['email']!='[email protected]' or request.form['sifre']!='admin':
if Kullanici.query.filter_by(email=request.form['email'],sifre=request.form['sifre']) is not None:
session['uye_giris']=True
return redirect(url_for('anasayfa'))
else:
hata='hatalı giris yaptınız'
else:
flash('giriş başarılı')
session['admin_giris']=True
return redirect(url_for('admin'))
return render_template('uyeGiris.html',hata=hata)
@app.route('/cikis')
def cikis():
session.pop('admin_giris',None)
session.pop('uye_giris',None)
return render_template("index.html")
@app.route('/urunEkle')
def urunEkle():
tumVeri=urunler.query.all()
return render_template("urunEkle.html",tumVeri=tumVeri)
@app.route('/urunEklemeYap',methods=['POST'])
def urunEklemeYap():
urunismi=request.form['urunismi']
urunResmi=request.form['urunresmi']
urunFiyati=request.form['fiyati']
markaId=request.form['markaId']
yeniUrun=urunler(urunismi=urunismi,urunresmi=urunResmi,urunFiyati=urunFiyati,markaId=markaId)
db.session.add(yeniUrun)
db.session.commit()
return redirect(url_for("urunEkle"))
@app.route("/sil/<string:id>")
def sil(id):
urun=urunler.query.filter_by(urun_id=id).first()
db.session.delete(urun)
db.session.commit()
return redirect(url_for("urunEkle"))
@app.route('/guncelle/<string:id>',methods=['POST','GET'])
def guncelle(id):
if request.method=='POST':
try:
urunismi=request.form['urunismi']
urunresmi=request.form['urunresmi']
fiyati=request.form['fiyati']
markaId=request.form['markaId']
with sql.connect("eticaret.sqlite") as con:
cur=con.cursor()
if urunler.query.filter_by(urun_id=id).first() is not None:
cur.execute("UPDATE urunler SET urunismi=?,urunresmi=?,fiyati=?,markaId=? WHERE urun_id=?",(urunismi,urunresmi,fiyati,markaId,id))
except:
con.rollback()
msg="hata olustu"
finally:
con.close()
return render_template("urunEkle.html",msg=msg)
@app.route('/Markalar')
def Markalar():
tumMarka=markalar.query.all()
return render_template("marka.html",tumMarka=tumMarka)
return app
if __name__ == '__main__':
app=create_app()
app.run(host='127.0.0.1', port=5000, debug=True) | [
"[email protected]"
] | |
47ec1eb674eb47c0d01660f188daff40943df5e2 | b5fb45288ed2a204692051ab78e72d8aa6e5accd | /argo_data_scripts/vis/vis_contrast.py | 51dc949c3ad92268e7c9143fa92182c57a7c0d8f | [
"Apache-2.0"
] | permissive | nithinksath96/MMdetection_TensorRT_FP16 | d4987f003798f5d6d4fe5bde2f30dd5ee2e8596d | c8379b209d4deeff9350baf5bbedfc95fb8941f4 | refs/heads/master | 2023-02-13T20:00:21.834541 | 2021-01-06T09:24:20 | 2021-01-06T09:24:20 | 327,260,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,937 | py | # Merge and convert real-time results
# Optionally, visualize the output
# This script does not need to run in real-time
import argparse
from os import scandir
from os.path import join, isfile
from tqdm import tqdm
import numpy as np
from PIL import Image, ImageFont, ImageDraw
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2
from vis.make_videos_numbered import worker_func as make_video
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dir-A', type=str, default=None)
parser.add_argument('--dir-B', type=str, default=None)
parser.add_argument('--horizontal', action='store_true', default=False)
parser.add_argument('--split-pos', type=float, default=0.5)
parser.add_argument('--split-animation', type=str, default=None)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--out-dir', type=str, required=True)
parser.add_argument('--vis-scale', type=float, default=1)
parser.add_argument('--seq', type=str, default=None)
parser.add_argument('--make-video', action='store_true', default=False)
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
# Smoothing functions
# map time from 0-1 to progress from 0-1
def ease_in_out(t):
return -np.cos(np.pi*t)/2 + 0.5
# animations
def split_anime_swing(t, split_pos, l, line_width):
# timing information in seconds
durations = [4, 1, 3, 2, 3, 1]
small_end = -line_width//2 - 1
big_end = l + line_width//2
k = 0
last_key = 0
if t < durations[k]:
return split_pos
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
start_pos = split_pos
end_pos = big_end
p = ease_in_out((t - last_key)/durations[k])
return start_pos + p*(end_pos - start_pos)
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
return big_end
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
start_pos = big_end
end_pos = small_end
p = ease_in_out((t - last_key)/durations[k])
return start_pos + p*(end_pos - start_pos)
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
return small_end
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
start_pos = small_end
end_pos = split_pos
p = ease_in_out((t - last_key)/durations[k])
return start_pos + p*(end_pos - start_pos)
return split_pos
def main():
opts = parse_args()
seqs = sorted([item.name for item in scandir(opts.dir_A) if item.is_dir()])
if opts.seq is not None:
if opts.seq.isdigit():
idx = int(opts.seq)
else:
idx = seqs.index(opts.seq)
seqs = [seqs[idx]]
line_width = 15
line_color = [241, 159, 93]
line_color = np.array(line_color, dtype=np.uint8).reshape((1, 1, 3))
# font_path = r'C:\Windows\Fonts\Rock.otf'
# font_path = r'C:\Windows\Fonts\AdobeGurmukhi-Regular.otf'
# font = ImageFont.truetype(font_path, size=40)
for s, seq in enumerate(seqs):
print(f'Processing {s + 1}/{len(seqs)}: {seq}')
seq_dir_A = join(opts.dir_A, seq)
seq_dir_B = join(opts.dir_B, seq)
seq_dir_out = mkdir2(join(opts.out_dir, seq))
frame_list = [item.name for item in scandir(seq_dir_A) if item.is_file() and item.name.endswith('.jpg')]
frame_list = sorted(frame_list)
# frame_list = frame_list[:330]
for ii, frame in enumerate(tqdm(frame_list)):
out_path = join(seq_dir_out, frame)
if not opts.overwrite and isfile(out_path):
continue
img_A = Image.open(join(seq_dir_A, frame))
img_B = Image.open(join(seq_dir_B, frame))
w, h = img_A.size
l = h if opts.horizontal else w
split_pos = opts.split_pos if opts.split_pos > 1 else l*opts.split_pos
if opts.split_animation:
split_pos = globals()['split_anime_' + opts.split_animation](
ii/opts.fps, split_pos, l, line_width,
)
split_pos = int(round(split_pos))
line_start = split_pos - (line_width - 1)//2
line_end = split_pos + line_width//2 # inclusive
# using TrueType supported in PIL
# draw = ImageDraw.Draw(img)
# draw.text(
# (lt[0], lt[1] - font.size),
# text, (*color, 1), # RGBA
# font=font,
# )
if split_pos <= 0:
img = np.array(img_B)
else:
img = np.array(img_A)
img_B = np.asarray(img_B)
if opts.horizontal:
img[split_pos:] = img_B[split_pos:]
else:
img[:, split_pos:] = img_B[:, split_pos:]
if line_start < l and line_end >= 0:
# line is visible
line_start = max(0, line_start)
line_end = min(l, line_end)
if opts.horizontal:
img[line_start:line_end, :] = line_color
else:
img[:, line_start:line_end] = line_color
Image.fromarray(img).save(out_path)
if opts.make_video:
out_path = seq_dir_out + '.mp4'
if opts.overwrite or not isfile(out_path):
print('Making the video')
make_video((seq_dir_out, opts))
else:
print(f'python vis/make_videos_numbered.py "{opts.out_dir}" --fps {opts.fps}')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
65f48d5c7bdf634c88c47b5df4e34a43fb7061f4 | dfaa71f8064d3d0773941cf14ab86ff57ff67284 | /part35/blog/models.py | e096fa50faab3cc7fd3ab30abcc1c60a66a4db46 | [
"Apache-2.0"
] | permissive | yllew36/WellyGI | e94c5000ff3a7f2fd7316d22ad166fbf7916ea23 | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | refs/heads/master | 2020-09-05T15:49:37.386078 | 2019-11-15T08:16:59 | 2019-11-15T08:16:59 | 220,148,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from django.db import models
# Create your models here.
class PostModel(models.Model):
judul = models.CharField(max_length=20)
body = models.TextField()
category = models.CharField(max_length=20)
published = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return "{}. {}" .format(self.id,self.judul) | [
"[email protected]"
] | |
059945b503bc8e858ff2111eb171506610714b02 | 8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6 | /venv/Lib/site-packages/mypy/typeshed/third_party/2and3/markdown/extensions/legacy_attrs.pyi | 035b589a9f34a9529d88c4a202238e44166669ca | [] | no_license | RodrigoNeto/cursopythonyt | fc064a2e6106324e22a23c54bdb9c31040ac9eb6 | 279dad531e21a9c7121b73d84fcbdd714f435e7e | refs/heads/master | 2023-07-03T00:54:09.795054 | 2021-08-13T12:42:24 | 2021-08-13T12:42:24 | 395,646,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | pyi | from typing import Any, Pattern
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
ATTR_RE: Pattern
class LegacyAttrs(Treeprocessor):
def handleAttributes(self, el, txt): ...
class LegacyAttrExtension(Extension): ...
def makeExtension(**kwargs): ...
| [
"[email protected]"
] | |
5fc3082edcd76e319526f196ca4f0ac1f08de570 | d10d6d037ad741e2383eb1bb8bbc5dd3b3f09e41 | /python3/practice/classic_puzzle/easy/pirates-treasure.py | b6637587260c3ffcd0f4ba3521b9a43ac09effcb | [] | no_license | jz4o/codingames | 4614d34a3d9be07205747ee3617479e1e77fd6ed | 1f2222d23aaf670c7ddb658e7aca1a8e12e9bcab | refs/heads/master | 2023-08-18T03:30:04.640290 | 2023-08-17T14:24:04 | 2023-08-17T14:24:04 | 102,266,024 | 6 | 2 | null | 2021-05-01T13:33:34 | 2017-09-03T13:20:10 | Java | UTF-8 | Python | false | false | 1,193 | py | # import sys
# import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
w = int(input())
h = int(input())
grid = []
grid.append([1] * (w + 2))
for i in range(h):
row = []
row.append(1)
for j in input().split():
v = int(j)
row.append(v)
row.append(1)
grid.append(row)
grid.append([1] * (w + 2))
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
def find_pirates_treasure_indexes(grid):
for row_index, row in enumerate(grid[1:-1], start=1):
for column_index, column in enumerate(row[1:-1], start=1):
if 0 in grid[row_index - 1][column_index - 1:column_index + 2]:
continue
if row[column_index - 1] == 0:
continue
if column != 0:
continue
if row[column_index + 1] == 0:
continue
if 0 in grid[row_index + 1][column_index - 1:column_index + 2]:
continue
return f'{column_index - 1} {row_index - 1}'
result = find_pirates_treasure_indexes(grid)
# print("x y")
print(result)
| [
"[email protected]"
] | |
98348901cc25f5b865ce5e0c8986c4f5c88a3439 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_297/ch13_2019_06_06_20_45_46_311950.py | 0b921552630eff594c165545c6ad10f88e51165d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | def calcula_posicao(t,so,v):
s=so+v*t
return s | [
"[email protected]"
] | |
4c04be0c1b62494ac0091aebed2588dc3cae107f | bcc916eb6c5ce649758c903fba6065f6d53dc6d2 | /datalad_webapp/__init__.py | 9ebfad5fa15a49e81b35bb43c19031ae832458dd | [
"MIT"
] | permissive | bpoldrack/datalad-webapp | ed4d432d3a6fc49eff40bfcc296e5426ae3ab3b6 | c6107ba0460b7a967cabe5cecbf2b8e18b3fbf49 | refs/heads/master | 2021-05-25T11:44:53.770310 | 2018-03-29T13:38:53 | 2018-03-29T13:38:53 | 127,297,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,220 | py | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""DataLad webapp support"""
__docformat__ = 'restructuredtext'
import logging
from os.path import dirname
from os.path import basename
from os.path import isdir
from os.path import join as opj
from glob import glob
from datalad import cfg
from pkg_resources import iter_entry_points
from datalad.dochelpers import exc_str
from datalad.utils import assure_list
from datalad.interface.base import Interface
from datalad.interface.base import build_doc
from datalad.support.param import Parameter
from datalad.distribution.dataset import datasetmethod
from datalad.interface.utils import eval_results
from datalad.support.constraints import EnsureNone
from datalad.distribution.dataset import EnsureDataset
# defines a datalad command suite
# this symbold must be indentified as a setuptools entrypoint
# to be found by datalad
module_suite = (
# description of the command suite, displayed in cmdline help
"Generic web app support",
[('datalad_webapp', 'WebApp', 'webapp', 'webapp')]
)
# we want to hook into datalad's logging infrastructure, so we use a common
# prefix
lgr = logging.getLogger('datalad.module.webapp')
@build_doc
class WebApp(Interface):
"""
"""
_params_ = dict(
app=Parameter(
args=('--app',),
doc="yeah!",
nargs='+',
action='append'),
dataset=Parameter(
args=("-d", "--dataset"),
doc="""specify the dataset to serve as the anchor of the webapp.
An attempt is made to identify the dataset based on the current
working directory. If a dataset is given, the command will be
executed in the root directory of this dataset.""",
constraints=EnsureDataset() | EnsureNone()),
daemonize=Parameter(
args=("--daemonize",),
action='store_true',
doc="yeah!"),
)
@staticmethod
@datasetmethod(name='webapp')
@eval_results
def __call__(app, dataset=None, daemonize=False):
apps = assure_list(app)
if not apps:
raise ValueError('no app specification given')
if not isinstance(apps[0], (list, tuple)):
apps = [apps]
apps = {a[0] if isinstance(a, (list, tuple)) else a:
a[1] if isinstance(a, (list, tuple)) and len(a) > 1 else None
for a in apps}
import cherrypy
# global config
cherrypy.config.update({
# prevent visible tracebacks, etc:
# http://docs.cherrypy.org/en/latest/config.html#id14
#'environment': 'production',
#'log.error_file': 'site.log',
})
# set the priority according to your needs if you are hooking something
# else on the 'before_finalize' hook point.
@cherrypy.tools.register('before_finalize', priority=60)
def secureheaders():
headers = cherrypy.response.headers
headers['X-Frame-Options'] = 'DENY'
headers['X-XSS-Protection'] = '1; mode=block'
headers['Content-Security-Policy'] = "default-src='self'"
# only add Strict-Transport headers if we're actually using SSL; see the ietf spec
# "An HSTS Host MUST NOT include the STS header field in HTTP responses
# conveyed over non-secure transport"
# http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec-14#section-7.2
if (cherrypy.server.ssl_certificate != None and
cherrypy.server.ssl_private_key != None):
headers['Strict-Transport-Security'] = 'max-age=31536000' # one year
if daemonize:
from cherrypy.process.plugins import Daemonizer
Daemonizer(cherrypy.engine).subscribe()
#PIDFile(cherrypy.engine, '/var/run/myapp.pid').subscribe()
# when running on a priviledged port
#DropPrivileges(cherrypy.engine, uid=1000, gid=1000).subscribe()
enabled_apps = []
for ep in iter_entry_points('datalad.webapps'):
if ep.name not in apps:
continue
mount = apps[ep.name] if apps[ep.name] else '/'
# get the webapp class
cls = ep.load()
# fire up the webapp instance
inst = cls(**dict(dataset=dataset))
# mount under global URL tree (default or given suburl)
app = cherrypy.tree.mount(
root=inst,
script_name=mount,
# app config file, it is ok for that file to not exist
config=cls._webapp_config
)
# forcefully impose more secure mode
# TODO might need one (or more) switch(es) to turn things off for
# particular scenarios
enabled_apps.append(ep.name)
app.merge({
'/': {
# turns all security headers on
'tools.secureheaders.on': True,
'tools.sessions.secure': True,
'tools.sessions.httponly': True}})
static_dir = opj(cls._webapp_dir, cls._webapp_staticdir)
if isdir(static_dir):
app.merge({
# the key has to be / even when an app is mount somewhere
# below
'/': {
'tools.staticdir.on': True,
'tools.staticdir.root': cls._webapp_dir,
'tools.staticdir.dir': cls._webapp_staticdir}}
)
failed_apps = set(apps).difference(enabled_apps)
if failed_apps:
lgr.warning('Failed to load webapps: %s', failed_apps)
if not enabled_apps:
return
cherrypy.engine.start()
cherrypy.engine.block()
yield {}
| [
"[email protected]"
] | |
670cd19658bb9118b6186e4645afd044289372ac | 82042141439ae004fc38bb2ef6238f36ec6bb050 | /attendance/settings.py | 03f8fb508ead9f5f52ebcd48f3f82c239a48a82e | [] | no_license | psteichen/clusil-intranet | 2e9a2cf3b00692a4ef441ebf669af4e63945e9a2 | 5c028d33f6a8559af57a4eeb02fc0f612cb1b261 | refs/heads/master | 2021-07-13T15:40:06.464105 | 2020-06-30T19:51:00 | 2020-06-30T19:51:00 | 27,195,950 | 2 | 1 | null | 2021-06-10T20:06:47 | 2014-11-26T20:59:46 | Python | UTF-8 | Python | false | false | 1,458 | py | # Application settings for attendance app
# coding=utf-8
ACTIONS = {
'invite': (
{
'label' : u'Invite (a) collegue(s).',
'grade' : 'info',
'icon' : 'plus',
'url' : '/meetings/invite/',
},
),
}
ATTENDANCE_TMPL_CONTENT = {
'template' : 'done.html',
'too_late' : u'Sorry, it is <strong>too late</strong> to confirm/cancel your participation!',
# 'actions' : ACTIONS['invite'],
'actions' : None,
'yes' : u'%(name)s, herewith your <strong>participation</strong> is <strong>confirmed</strong>!',
'no' : u'%(name)s, thank you for notifying us your cancellation, you will be <strong>excused</strong>!',
'details' : u'''<p class="lead">
<ul class="lead">
Title: <em>%(title)s</em><br/>
Location: <strong>%(location)s</strong><br/>
Date: <em>%(when)s</em><br/>
Time: %(time)s<br/>
</ul></p>
''',
'event': {
'title' : u'Your participation to the following event: "%(event)s"',
'email' : {
'yes' : u'''
Herewith your participation to "%(event)s" is *confirmed*!''',
'no' : u'''
Thank you for notifying us your cancellation for "%(event)s".
You will be *excused*.''',
},
},
'meeting': {
'title' : u'%(meeting)s meeting',
'email' : {
'yes' : u'''
Herewith your participation to "%(meeting)s" is *confirmed*!''',
'no' : u'''
Thank you for notifying us your cancellation for "%(meeting)s".
You will be *excused*.''',
},
},
}
| [
"[email protected]"
] | |
c5e49b4de1ac2ec9e848c99af5781b7e842799cf | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/RANet_ID0994_for_PyTorch/dataloader.py | 8d73f52aa9fc6f2e781b3bfbb395740fb1153839 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,745 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import os
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
def get_dataloaders(args):
train_loader, val_loader, test_loader = None, None, None
if args.data == 'cifar10':
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
train_set = datasets.CIFAR10(args.data_root, train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]))
val_set = datasets.CIFAR10(args.data_root, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
elif args.data == 'cifar100':
normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
std=[0.2675, 0.2565, 0.2761])
train_set = datasets.CIFAR100(args.data_root, train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]))
val_set = datasets.CIFAR100(args.data_root, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
else:
# ImageNet
traindir = os.path.join(args.data_root, 'train')
valdir = os.path.join(args.data_root, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]))
val_set = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
]))
if args.use_valid:
train_set_index = torch.randperm(len(train_set))
if os.path.exists(os.path.join(args.save, 'index.pth')):
print('!!!!!! Load train_set_index !!!!!!')
train_set_index = torch.load(os.path.join(args.save, 'index.pth'))
else:
print('!!!!!! Save train_set_index !!!!!!')
torch.save(train_set_index, os.path.join(args.save, 'index.pth'))
if args.data.startswith('cifar'):
num_sample_valid = 5000
else:
num_sample_valid = 50000
if 'train' in args.splits:
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
train_set_index[:-num_sample_valid]),
num_workers=args.workers, pin_memory=False)
if 'val' in args.splits:
val_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
train_set_index[-num_sample_valid:]),
num_workers=args.workers, pin_memory=False)
if 'test' in args.splits:
test_loader = torch.utils.data.DataLoader(
val_set,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
else:
if 'train' in args.splits:
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=False)
if 'val' or 'test' in args.splits:
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
test_loader = val_loader
return train_loader, val_loader, test_loader
| [
"[email protected]"
] | |
485f03dae69df4b6ada9fb3ae69c80c9a25a63be | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03018/s073874742.py | f981861b7dd86b3250675915160c0c2a28113c5c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | S = input()
rlt = 0
b = ''
tA = 0
A = 0
BC = 0
for s in S:
if s == 'A':
if b == 'A':
tA += 1
elif b == 'B':
rlt += A*BC
tA = 1
A = 0
BC = 0
else:
rlt += A*BC
tA = A + 1
A = 0
BC = 0
elif s == 'B':
if b == 'B':
rlt += A*BC
tA = 0
A = 0
BC = 0
elif s == 'C':
if b == 'B':
A += tA
tA = 0
BC += 1
else:
rlt += A*BC
tA = 0
A = 0
BC = 0
b = s
rlt += A*BC
print(rlt) | [
"[email protected]"
] | |
fd1b448e1f6e22bb748d5cd5bbcdcab1faedf382 | ce7fe0c3c46b0cc3dbd8836f007464e320eefda6 | /main.py | f4f8306490673dc070f7b3ed8ff33c15d92215e3 | [] | no_license | happyday521/vunet | 2da3013399ec5660544d2b69b61486e8e1033bb7 | f0d6ef73b02e6099d3b5e4f37e4ddaaa5f8437e6 | refs/heads/master | 2021-09-24T15:12:07.310072 | 2018-04-13T14:48:40 | 2018-04-13T14:48:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,013 | py | import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
session = tf.Session(config = config)
import os, logging, shutil, datetime
import glob
import argparse
import yaml
import numpy as np
from tqdm import tqdm, trange
import nn
import models
from batches import get_batches, plot_batch, postprocess, n_boxes
import deeploss
def init_logging(out_base_dir):
# get unique output directory based on current time
os.makedirs(out_base_dir, exist_ok = True)
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
out_dir = os.path.join(out_base_dir, now)
os.makedirs(out_dir, exist_ok = False)
# copy source code to logging dir to have an idea what the run was about
this_file = os.path.realpath(__file__)
assert(this_file.endswith(".py"))
shutil.copy(this_file, out_dir)
# copy all py files to logging dir
src_dir = os.path.dirname(this_file)
py_files = glob.glob(os.path.join(src_dir, "*.py"))
for py_file in py_files:
shutil.copy(py_file, out_dir)
# init logging
logging.basicConfig(filename = os.path.join(out_dir, 'log.txt'))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
return out_dir, logger
class Model(object):
def __init__(self, config, out_dir, logger):
self.config = config
self.batch_size = config["batch_size"]
self.img_shape = 2*[config["spatial_size"]] + [3]
self.bottleneck_factor = config["bottleneck_factor"]
self.box_factor = config["box_factor"]
self.imgn_shape = 2*[config["spatial_size"]//(2**self.box_factor)] + [n_boxes*3]
self.init_batches = config["init_batches"]
self.initial_lr = config["lr"]
self.lr_decay_begin = config["lr_decay_begin"]
self.lr_decay_end = config["lr_decay_end"]
self.out_dir = out_dir
self.logger = logger
self.log_frequency = config["log_freq"]
self.ckpt_frequency = config["ckpt_freq"]
self.test_frequency = config["test_freq"]
self.checkpoint_best = False
self.dropout_p = config["drop_prob"]
self.best_loss = float("inf")
self.checkpoint_dir = os.path.join(self.out_dir, "checkpoints")
os.makedirs(self.checkpoint_dir, exist_ok = True)
self.define_models()
self.define_graph()
def define_models(self):
n_latent_scales = 2
n_scales = 1 + int(np.round(np.log2(self.img_shape[0]))) - self.bottleneck_factor
n_filters = 32
self.enc_up_pass = models.make_model(
"enc_up", models.enc_up,
n_scales = n_scales - self.box_factor,
n_filters = n_filters*2**self.box_factor)
self.enc_down_pass = models.make_model(
"enc_down", models.enc_down,
n_scales = n_scales - self.box_factor,
n_latent_scales = n_latent_scales)
self.dec_up_pass = models.make_model(
"dec_up", models.dec_up,
n_scales = n_scales,
n_filters = n_filters)
self.dec_down_pass = models.make_model(
"dec_down", models.dec_down,
n_scales = n_scales,
n_latent_scales = n_latent_scales)
self.dec_params = models.make_model(
"dec_params", models.dec_parameters)
def train_forward_pass(self, x, c, xn, cn, dropout_p, init = False):
kwargs = {"init": init, "dropout_p": dropout_p}
# encoder
hs = self.enc_up_pass(xn, cn, **kwargs)
es, qs, zs_posterior = self.enc_down_pass(hs, **kwargs)
# decoder
gs = self.dec_up_pass(c, **kwargs)
ds, ps, zs_prior = self.dec_down_pass(gs, zs_posterior, training = True, **kwargs)
params = self.dec_params(ds[-1], **kwargs)
activations = hs + es + gs + ds
return params, qs, ps, activations
def test_forward_pass(self, c):
kwargs = {"init": False, "dropout_p": 0.0}
# decoder
gs = self.dec_up_pass(c, **kwargs)
ds, ps, zs_prior = self.dec_down_pass(gs, [], training = False, **kwargs)
params = self.dec_params(ds[-1], **kwargs)
return params
def transfer_pass(self, infer_x, infer_c, generate_c):
kwargs = {"init": False, "dropout_p": 0.0}
# infer latent code
hs = self.enc_up_pass(infer_x, infer_c, **kwargs)
es, qs, zs_posterior = self.enc_down_pass(hs, **kwargs)
zs_mean = list(qs)
# generate from inferred latent code and conditioning
gs = self.dec_up_pass(generate_c, **kwargs)
use_mean = True
if use_mean:
ds, ps, zs_prior = self.dec_down_pass(gs, zs_mean, training = True, **kwargs)
else:
ds, ps, zs_prior = self.dec_down_pass(gs, zs_posterior, training = True, **kwargs)
params = self.dec_params(ds[-1], **kwargs)
return params
def sample(self, params, **kwargs):
return params
def likelihood_loss(self, x, params):
return 5.0*self.vgg19.make_loss_op(x, params)
def define_graph(self):
# pretrained net for perceptual loss
self.vgg19 = deeploss.VGG19Features(session,
feature_layers = self.config["feature_layers"],
feature_weights = self.config["feature_weights"],
gram_weights = self.config["gram_weights"])
global_step = tf.Variable(0, trainable = False, name = "global_step")
lr = nn.make_linear_var(
global_step,
self.lr_decay_begin, self.lr_decay_end,
self.initial_lr, 0.0,
0.0, self.initial_lr)
kl_weight = nn.make_linear_var(
global_step,
self.lr_decay_end // 2, 3 * self.lr_decay_end // 4,
1e-6, 1.0,
1e-6, 1.0)
# initialization
self.x_init = tf.placeholder(
tf.float32,
shape = [self.init_batches * self.batch_size] + self.img_shape)
self.c_init = tf.placeholder(
tf.float32,
shape = [self.init_batches * self.batch_size] + self.img_shape)
self.xn_init = tf.placeholder(
tf.float32,
shape = [self.init_batches * self.batch_size] + self.imgn_shape)
self.cn_init = tf.placeholder(
tf.float32,
shape = [self.init_batches * self.batch_size] + self.imgn_shape)
_ = self.train_forward_pass(
self.x_init, self.c_init,
self.xn_init, self.cn_init,
dropout_p = self.dropout_p, init = True)
# training
self.x = tf.placeholder(
tf.float32,
shape = [self.batch_size] + self.img_shape)
self.c = tf.placeholder(
tf.float32,
shape = [self.batch_size] + self.img_shape)
self.xn = tf.placeholder(
tf.float32,
shape = [self.batch_size] + self.imgn_shape)
self.cn = tf.placeholder(
tf.float32,
shape = [self.batch_size] + self.imgn_shape)
# compute parameters of model distribution
params, qs, ps, activations = self.train_forward_pass(
self.x, self.c,
self.xn, self.cn,
dropout_p = self.dropout_p)
# sample from model distribution
sample = self.sample(params)
# maximize likelihood
likelihood_loss = self.likelihood_loss(self.x, params)
kl_loss = tf.to_float(0.0)
for q, p in zip(qs, ps):
self.logger.info("Latent shape: {}".format(q.shape.as_list()))
kl_loss += models.latent_kl(q, p)
loss = likelihood_loss + kl_weight * kl_loss
# testing
test_forward = self.test_forward_pass(self.c)
test_sample = self.sample(test_forward)
# reconstruction
reconstruction_params, _, _, _ = self.train_forward_pass(
self.x, self.c,
self.xn, self.cn,
dropout_p = 0.0)
self.reconstruction = self.sample(reconstruction_params)
# optimization
self.trainable_variables = [v for v in tf.trainable_variables()
if not v in self.vgg19.variables]
optimizer = tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5, beta2 = 0.9)
opt_op = optimizer.minimize(loss, var_list = self.trainable_variables)
with tf.control_dependencies([opt_op]):
self.train_op = tf.assign(global_step, global_step + 1)
# logging and visualization
self.log_ops = dict()
self.log_ops["global_step"] = global_step
self.log_ops["likelihood_loss"] = likelihood_loss
self.log_ops["kl_loss"] = kl_loss
self.log_ops["kl_weight"] = kl_weight
self.log_ops["loss"] = loss
self.img_ops = dict()
self.img_ops["sample"] = sample
self.img_ops["test_sample"] = test_sample
self.img_ops["x"] = self.x
self.img_ops["c"] = self.c
for i, l in enumerate(self.vgg19.losses):
self.log_ops["vgg_loss_{}".format(i)] = l
# keep seperate train and validation summaries
# only training summary contains histograms
train_summaries = list()
for k, v in self.log_ops.items():
train_summaries.append(tf.summary.scalar(k, v))
self.train_summary_op = tf.summary.merge_all()
valid_summaries = list()
for k, v in self.log_ops.items():
valid_summaries.append(tf.summary.scalar(k+"_valid", v))
self.valid_summary_op = tf.summary.merge(valid_summaries)
# all variables for initialization
self.variables = [v for v in tf.global_variables()
if not v in self.vgg19.variables]
self.logger.info("Defined graph")
def init_graph(self, init_batch):
self.writer = tf.summary.FileWriter(
self.out_dir,
session.graph)
self.saver = tf.train.Saver(self.variables)
initializer_op = tf.variables_initializer(self.variables)
session.run(initializer_op, {
self.xn_init: init_batch[2],
self.cn_init: init_batch[3],
self.x_init: init_batch[0],
self.c_init: init_batch[1]})
self.logger.info("Initialized model from scratch")
def restore_graph(self, restore_path):
self.writer = tf.summary.FileWriter(
self.out_dir,
session.graph)
self.saver = tf.train.Saver(self.variables)
self.saver.restore(session, restore_path)
self.logger.info("Restored model from {}".format(restore_path))
def reset_global_step(self):
session.run(tf.assign(self.log_ops["global_step"], 0))
self.logger.info("Reset global_step")
def fit(self, batches, valid_batches = None):
start_step = self.log_ops["global_step"].eval(session)
self.valid_batches = valid_batches
for batch in trange(start_step, self.lr_decay_end):
X_batch, C_batch, XN_batch, CN_batch = next(batches)
feed_dict = {
self.xn: XN_batch,
self.cn: CN_batch,
self.x: X_batch,
self.c: C_batch}
fetch_dict = {"train": self.train_op}
if self.log_ops["global_step"].eval(session) % self.log_frequency == 0:
fetch_dict["log"] = self.log_ops
fetch_dict["img"] = self.img_ops
fetch_dict["summary"] = self.train_summary_op
result = session.run(fetch_dict, feed_dict)
self.log_result(result)
def log_result(self, result, **kwargs):
global_step = self.log_ops["global_step"].eval(session)
if "summary" in result:
self.writer.add_summary(result["summary"], global_step)
self.writer.flush()
if "log" in result:
for k in sorted(result["log"]):
v = result["log"][k]
self.logger.info("{}: {}".format(k, v))
if "img" in result:
for k, v in result["img"].items():
plot_batch(v, os.path.join(
self.out_dir,
k + "_{:07}.png".format(global_step)))
if self.valid_batches is not None:
# validation run
X_batch, C_batch, XN_batch, CN_batch = next(self.valid_batches)
feed_dict = {
self.xn: XN_batch,
self.cn: CN_batch,
self.x: X_batch,
self.c: C_batch}
fetch_dict = dict()
fetch_dict["imgs"] = self.img_ops
fetch_dict["summary"] = self.valid_summary_op
fetch_dict["validation_loss"] = self.log_ops["loss"]
result = session.run(fetch_dict, feed_dict)
self.writer.add_summary(result["summary"], global_step)
self.writer.flush()
# display samples
imgs = result["imgs"]
for k, v in imgs.items():
plot_batch(v, os.path.join(
self.out_dir,
"valid_" + k + "_{:07}.png".format(global_step)))
# log validation loss
validation_loss = result["validation_loss"]
self.logger.info("{}: {}".format("validation_loss", validation_loss))
if self.checkpoint_best and validation_loss < self.best_loss:
# checkpoint if validation loss improved
self.logger.info("step {}: Validation loss improved from {:.4e} to {:.4e}".format(global_step, self.best_loss, validation_loss))
self.best_loss = validation_loss
self.make_checkpoint(global_step, prefix = "best_")
if global_step % self.test_frequency == 0:
if self.valid_batches is not None:
# testing
X_batch, C_batch, XN_batch, CN_batch = next(self.valid_batches)
x_gen = self.test(C_batch)
for k in x_gen:
plot_batch(x_gen[k], os.path.join(
self.out_dir,
"testing_{}_{:07}.png".format(k, global_step)))
# transfer
bs = X_batch.shape[0]
imgs = list()
imgs.append(np.zeros_like(X_batch[0,...]))
for r in range(bs):
imgs.append(C_batch[r,...])
for i in range(bs):
x_infer = XN_batch[i,...]
c_infer = CN_batch[i,...]
imgs.append(X_batch[i,...])
x_infer_batch = x_infer[None,...].repeat(bs, axis = 0)
c_infer_batch = c_infer[None,...].repeat(bs, axis = 0)
c_generate_batch = C_batch
results = model.transfer(x_infer_batch, c_infer_batch, c_generate_batch)
for j in range(bs):
imgs.append(results[j,...])
imgs = np.stack(imgs, axis = 0)
plot_batch(imgs, os.path.join(
out_dir,
"transfer_{:07}.png".format(global_step)))
if global_step % self.ckpt_frequency == 0:
self.make_checkpoint(global_step)
def make_checkpoint(self, global_step, prefix = ""):
fname = os.path.join(self.checkpoint_dir, prefix + "model.ckpt")
self.saver.save(
session,
fname,
global_step = global_step)
self.logger.info("Saved model to {}".format(fname))
def test(self, c_batch):
results = dict()
results["cond"] = c_batch
sample = session.run(self.img_ops["test_sample"],
{self.c: c_batch})
results["test_sample"] = sample
return results
def reconstruct(self, x_batch, c_batch):
return session.run(
self.reconstruction,
{self.x: x_batch, self.c: c_batch})
def transfer(self, x_encode, c_encode, c_decode):
initialized = getattr(self, "_init_transfer", False)
if not initialized:
# transfer
self.c_generator = tf.placeholder(
tf.float32,
shape = [self.batch_size] + self.img_shape)
infer_x = self.xn
infer_c = self.cn
generate_c = self.c_generator
transfer_params = self.transfer_pass(infer_x, infer_c, generate_c)
self.transfer_mean_sample = self.sample(transfer_params)
self._init_transfer = True
return session.run(
self.transfer_mean_sample, {
self.xn: x_encode,
self.cn: c_encode,
self.c_generator: c_decode})
if __name__ == "__main__":
default_log_dir = os.path.join(os.getcwd(), "log")
parser = argparse.ArgumentParser()
parser.add_argument("--config", required = True, help = "path to config")
parser.add_argument("--mode", default = "train",
choices=["train", "test", "add_reconstructions", "transfer"])
parser.add_argument("--log_dir", default = default_log_dir, help = "path to log into")
parser.add_argument("--checkpoint", help = "path to checkpoint to restore")
parser.add_argument("--retrain", dest = "retrain", action = "store_true", help = "reset global_step to zero")
parser.set_defaults(retrain = False)
opt = parser.parse_args()
with open(opt.config) as f:
config = yaml.load(f)
out_dir, logger = init_logging(opt.log_dir)
logger.info(opt)
logger.info(yaml.dump(config))
if opt.mode == "train":
batch_size = config["batch_size"]
img_shape = 2*[config["spatial_size"]] + [3]
data_shape = [batch_size] + img_shape
init_shape = [config["init_batches"] * batch_size] + img_shape
box_factor = config["box_factor"]
data_index = config["data_index"]
batches = get_batches(data_shape, data_index, train = True, box_factor = box_factor)
init_batches = get_batches(init_shape, data_index, train = True, box_factor = box_factor)
valid_batches = get_batches(data_shape, data_index, train = False, box_factor = box_factor)
logger.info("Number of training samples: {}".format(batches.n))
logger.info("Number of validation samples: {}".format(valid_batches.n))
model = Model(config, out_dir, logger)
if opt.checkpoint is not None:
model.restore_graph(opt.checkpoint)
else:
model.init_graph(next(init_batches))
if opt.retrain:
model.reset_global_step()
model.fit(batches, valid_batches)
else:
raise NotImplemented()
| [
"[email protected]"
] | |
5771286c57f461941cc57f7ef650d4dca16a9e7e | cc14a65db9243584879726349af66297f7363298 | /docs/conf.py | 8ac01a18279ac15494881932a56749457a85e4b1 | [
"MIT"
] | permissive | AyraHikari/telegram-upload | bed549b358a457870a112d69d775d2bb8d251099 | b29ba50ba0df03e9991eccd64e6a42a6ca69c73f | refs/heads/master | 2020-04-29T00:31:40.889784 | 2019-03-08T19:49:19 | 2019-03-08T19:49:19 | 175,696,591 | 1 | 1 | MIT | 2019-03-14T20:44:22 | 2019-03-14T20:44:22 | null | UTF-8 | Python | false | false | 9,070 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# telegram_upload documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
__dir__ = os.path.dirname(__file__)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import telegram_upload
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'telegram-upload'
copyright = u"2018, Nekmo"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = telegram_upload.__version__
# The full version, including alpha/beta/rc tags.
release = telegram_upload.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'logo.png',
'description': 'Hack your Amazon Dash to run what you want',
'github_user': 'Nekmo',
'github_repo': 'telegram_upload',
'github_type': 'star',
'github_banner': True,
'travis_button': True,
'codecov_button': True,
'analytics_id': 'UA-62276079-1',
'canonical_url': 'http://docs.nekmo.org/telegram-upload/'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'telegram_uploaddoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'telegram_upload.tex',
u'telegram-upload Documentation',
u'Nekmo', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'telegram_upload',
u'telegram-upload Documentation',
[u'Nekmo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'telegram_upload',
u'telegram-upload Documentation',
u'Nekmo',
'telegram_upload',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
app.add_stylesheet('_static/custom.css')
| [
"[email protected]"
] | |
5086248faf63027b66d09017218bbf3019c4625f | 881a76acaf0b6a26fd34548f0b1abbcf176a37b2 | /ezgal/__init__.py | 70fdcaa0ab7a97b7b8f4f8afe7ec51d6a94d571a | [
"MIT"
] | permissive | gsnyder206/ezgal | 930ed644b00332e3fc1f733a32afc72d511b7cb0 | f10e57021ca88c7139a28b058b8716c5507ce48c | refs/heads/master | 2021-01-11T11:17:09.551803 | 2017-01-11T16:44:50 | 2017-01-11T16:44:50 | 78,657,426 | 0 | 0 | null | 2017-01-11T16:28:50 | 2017-01-11T16:28:50 | null | UTF-8 | Python | false | false | 1,767 | py | import ezgal,utils,astro_filter,ezgal_light,wrapper,sfhs,weight,dusts
__all__ = ["model", "utils", "wrapper", "sfhs", "weight"]
__author__ = 'Conor Mancone, Anthony Gonzalez'
__email__ = '[email protected]'
__ver__ = '2.0'
ezgal = ezgal.ezgal
model = ezgal
astro_filter = astro_filter.astro_filter
ezgal_light = ezgal_light.ezgal_light
wrapper = wrapper.wrapper
weight = weight.weight
def interpolate( values, xs, models=None, key=None, return_wrapper=False ):
""" models = ezgal.interpolate( values, xs, models, return_wrapper=False )
or
models = ezgal.interpolate( values, models, key=meta_key, return_wrapper=False )
Interpolate between EzGal models and return new models.
`models` is a list of EzGal model objects or filenames of EzGal compatible files.
`xs` is the values of the models to be interpolated between and `values` is a list
of values for the new models to be interpolated at.
Alternatively you can ignore xs and specify the name of a meta key
to use to build the interpolation grid.
Returns a list of EzGal model objects or a single EzGal model if a scalar is passed
for `values`. Alternatively, set return_wrapper=True and it will return an ezgal wrapper
object containing the fitted models objects.
All model SEDs must have the same age/wavelength grid. """
# what calling sequence was used?
if models is None and key is not None:
return wrapper( xs ).interpolate( key, values, return_wrapper=return_wrapper )
# make sure we have everything we need...
if len( models ) != len( xs ): raise ValueErrors( 'xs list has a different length than models list!' )
# return interpolated models
return wrapper( models, extra_data=xs, extra_name='interp' ).interpolate( 'interp', values, return_wrapper=return_wrapper ) | [
"[email protected]"
] | |
2929024e5b2b882ef9940eb706da9e2ad2f7e2c7 | 482297526ed7eedc7c3600a4b98c45e775065852 | /emsapi/models/adi_ems_web_shared_tableau_rest_view_py3.py | 9815d11e7c037465cd2c29f30ceddec172074ad0 | [
"MIT"
] | permissive | ge-flight-analytics/emsapi-python | 9c441095c0f9c9dc8a42ee918f830a5261f4f4d1 | d3de16397c6f3a35c0965e4dfc76741d1379145e | refs/heads/master | 2023-08-03T20:47:45.950550 | 2023-07-26T16:13:01 | 2023-07-26T16:13:01 | 233,153,982 | 0 | 1 | MIT | 2023-07-26T16:13:31 | 2020-01-11T00:38:20 | Python | UTF-8 | Python | false | false | 1,008 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebSharedTableauRestView(Model):
"""AdiEmsWebSharedTableauRestView.
:param id:
:type id: str
:param name:
:type name: str
:param content_url:
:type content_url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
}
def __init__(self, *, id: str=None, name: str=None, content_url: str=None, **kwargs) -> None:
super(AdiEmsWebSharedTableauRestView, self).__init__(**kwargs)
self.id = id
self.name = name
self.content_url = content_url
| [
"[email protected]"
] | |
972e173dd684f111e265003215339e68adb7e6fb | cac36b279aa1ea889a5e3803efd615449c55922b | /src/012_costMinimize.py | 2d75190ae9e5e749cffd703687dcfa87d42aa3ce | [] | no_license | GunSik2/ml | 6e16cbbe04406686e394f59c634ed105bf90f849 | bdc7ad59c113e13eb13d01d05e30ec77cc96035f | refs/heads/master | 2020-06-12T09:53:30.965606 | 2017-01-05T03:37:34 | 2017-01-05T03:37:34 | 75,591,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import tensorflow as tf
# Single variable linear regression
# Hypothesis: H(x) = Wx + b
# Cost Fun.: cost(w,b) = 1/m * Sum(H(x) - y)^2
# Gradient descent: W := W - alpah * 1/m * Sum((W*x - y) * x)
# training data
x = [1., 2., 3., 4.]
y = [2., 4., 6., 8.]
# Initial value (w, b)
W = tf.Variable(tf.random_uniform([1], -10000., 10000.))
b = 0
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# Hypothesis
hypothesis = W * X + b
# Cost function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Gradicent descent // manual implementation
W1 = W - tf.mul(0.1, tf.reduce_mean(tf.mul( ( tf.mul(W, X) - Y ), X ), ))
update = W.assign(W1)
# launch
# before starting, initialize the variables
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# fit the line
for step in range(1000):
sess.run(update, feed_dict={X: x, Y: y})
print(step, sess.run(cost, feed_dict={X: x, Y: y}), sess.run(W))
# learns best fit is w: [2] b: [0]
print(sess.run(hypothesis, feed_dict={X: 5}))
print(sess.run(hypothesis, feed_dict={X: 2.5})) | [
"[email protected]"
] | |
272780c50836d3f265770a3986bd720199357fd3 | 5337ddfe3adf3a044bae5cdd530e8836b9000db1 | /tests/structshape_test.py | 0766479528b059d1cc38582a4d7d205a6395bf01 | [] | no_license | 404cafe/Swampy | ef1d3206e9fece098910fb41d542ac7195dde50a | 82fdafb27e219d0544b74a745f516bfb2264fdaf | refs/heads/master | 2020-12-01T06:27:08.869741 | 2020-03-04T08:41:59 | 2020-03-04T08:41:59 | 230,575,461 | 0 | 0 | null | 2019-12-28T07:30:18 | 2019-12-28T07:30:17 | null | UTF-8 | Python | false | false | 1,260 | py | """This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2011 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import unittest
from swampy.structshape import structshape
class Tests(unittest.TestCase):
def test_lumpy(self):
t = [1,2,3]
self.assertEqual(structshape(t), 'list of 3 int')
t2 = [[1,2], [3,4], [5,6]]
self.assertEqual(structshape(t2), 'list of 3 list of 2 int')
t3 = [1, 2, 3, 4.0, '5', '6', [7], [8], 9]
self.assertEqual(structshape(t3),
'list of (3 int, float, 2 str, 2 list of int, int)')
class Point:
"""trivial object type"""
t4 = [Point(), Point()]
self.assertEqual(structshape(t4), 'list of 2 Point')
s = set('abc')
self.assertEqual(structshape(s), 'set of 3 str')
lt = list(zip(t, s))
self.assertEqual(structshape(lt), 'list of 3 tuple of (int, str)')
d = dict(lt)
self.assertEqual(structshape(d), 'dict of 3 int->str')
it = iter('abc')
self.assertEqual(structshape(it), 'str_iterator of 3 str')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3c71832b90889a937d8108b84c77854b939df05d | 5dd190725aaaeb7287d935b3c99c20480b208816 | /official/vision/beta/modeling/backbones/resnet_3d.py | 0911b3ea6fadbbc3015c511ed20b32defd858db6 | [
"Apache-2.0",
"MIT"
] | permissive | DemonDamon/mask-detection-based-on-tf2odapi | 32d947164fb54395b9e45368c0d4bcf3a6ea1c28 | 192ae544169c1230c21141c033800aa1bd94e9b6 | refs/heads/main | 2023-05-13T05:05:44.534885 | 2021-06-08T05:56:09 | 2021-06-08T05:56:09 | 369,463,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,335 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D Residual Networks."""
from typing import Callable, List, Tuple, Optional
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.beta.modeling.backbones import factory
from official.vision.beta.modeling.layers import nn_blocks_3d
from official.vision.beta.modeling.layers import nn_layers
layers = tf.keras.layers
RESNET_SPECS = {
50: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 4),
('bottleneck3d', 256, 6),
('bottleneck3d', 512, 3),
],
101: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 4),
('bottleneck3d', 256, 23),
('bottleneck3d', 512, 3),
],
152: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 8),
('bottleneck3d', 256, 36),
('bottleneck3d', 512, 3),
],
200: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 24),
('bottleneck3d', 256, 36),
('bottleneck3d', 512, 3),
],
270: [
('bottleneck3d', 64, 4),
('bottleneck3d', 128, 29),
('bottleneck3d', 256, 53),
('bottleneck3d', 512, 4),
],
300: [
('bottleneck3d', 64, 4),
('bottleneck3d', 128, 36),
('bottleneck3d', 256, 54),
('bottleneck3d', 512, 4),
],
350: [
('bottleneck3d', 64, 4),
('bottleneck3d', 128, 36),
('bottleneck3d', 256, 72),
('bottleneck3d', 512, 4),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResNet3D(tf.keras.Model):
"""Creates a 3D ResNet family model."""
def __init__(
self,
model_id: int,
temporal_strides: List[int],
temporal_kernel_sizes: List[Tuple[int]],
use_self_gating: List[int] = None,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
stem_type: str = 'v0',
stem_conv_temporal_kernel_size: int = 5,
stem_conv_temporal_stride: int = 2,
stem_pool_temporal_stride: int = 2,
init_stochastic_depth_rate: float = 0.0,
activation: str = 'relu',
se_ratio: Optional[float] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a 3D ResNet model.
Args:
model_id: An `int` of depth of ResNet backbone model.
temporal_strides: A list of integers that specifies the temporal strides
for all 3d blocks.
temporal_kernel_sizes: A list of tuples that specifies the temporal kernel
sizes for all 3d blocks in different block groups.
use_self_gating: A list of booleans to specify applying self-gating module
or not in each block group. If None, self-gating is not applied.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
stem_type: A `str` of stem type of ResNet. Default to `v0`. If set to
`v1`, use ResNet-D type stem (https://arxiv.org/abs/1812.01187).
stem_conv_temporal_kernel_size: An `int` of temporal kernel size for the
first conv layer.
stem_conv_temporal_stride: An `int` of temporal stride for the first conv
layer.
stem_pool_temporal_stride: An `int` of temporal stride for the first pool
layer.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
activation: A `str` of name of the activation function.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._temporal_strides = temporal_strides
self._temporal_kernel_sizes = temporal_kernel_sizes
self._input_specs = input_specs
self._stem_type = stem_type
self._stem_conv_temporal_kernel_size = stem_conv_temporal_kernel_size
self._stem_conv_temporal_stride = stem_conv_temporal_stride
self._stem_pool_temporal_stride = stem_pool_temporal_stride
self._use_self_gating = use_self_gating
self._se_ratio = se_ratio
self._init_stochastic_depth_rate = init_stochastic_depth_rate
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build ResNet3D backbone.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
# Build stem.
if stem_type == 'v0':
x = layers.Conv3D(
filters=64,
kernel_size=[stem_conv_temporal_kernel_size, 7, 7],
strides=[stem_conv_temporal_stride, 2, 2],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
elif stem_type == 'v1':
x = layers.Conv3D(
filters=32,
kernel_size=[stem_conv_temporal_kernel_size, 3, 3],
strides=[stem_conv_temporal_stride, 2, 2],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
x = layers.Conv3D(
filters=32,
kernel_size=[1, 3, 3],
strides=[1, 1, 1],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
x = layers.Conv3D(
filters=64,
kernel_size=[1, 3, 3],
strides=[1, 1, 1],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
else:
raise ValueError(f'Stem type {stem_type} not supported.')
temporal_kernel_size = 1 if stem_pool_temporal_stride == 1 else 3
x = layers.MaxPool3D(
pool_size=[temporal_kernel_size, 3, 3],
strides=[stem_pool_temporal_stride, 2, 2],
padding='same')(
x)
# Build intermediate blocks and endpoints.
resnet_specs = RESNET_SPECS[model_id]
if len(temporal_strides) != len(resnet_specs) or len(
temporal_kernel_sizes) != len(resnet_specs):
raise ValueError(
'Number of blocks in temporal specs should equal to resnet_specs.')
endpoints = {}
for i, resnet_spec in enumerate(resnet_specs):
if resnet_spec[0] == 'bottleneck3d':
block_fn = nn_blocks_3d.BottleneckBlock3D
else:
raise ValueError('Block fn `{}` is not supported.'.format(
resnet_spec[0]))
x = self._block_group(
inputs=x,
filters=resnet_spec[1],
temporal_kernel_sizes=temporal_kernel_sizes[i],
temporal_strides=temporal_strides[i],
spatial_strides=(1 if i == 0 else 2),
block_fn=block_fn,
block_repeats=resnet_spec[2],
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 2, 5),
use_self_gating=use_self_gating[i] if use_self_gating else False,
name='block_group_l{}'.format(i + 2))
endpoints[str(i + 2)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(ResNet3D, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs: tf.Tensor,
filters: int,
temporal_kernel_sizes: Tuple[int],
temporal_strides: int,
spatial_strides: int,
block_fn: Callable[
...,
tf.keras.layers.Layer] = nn_blocks_3d.BottleneckBlock3D,
block_repeats: int = 1,
stochastic_depth_drop_rate: float = 0.0,
use_self_gating: bool = False,
name: str = 'block_group'):
"""Creates one group of blocks for the ResNet3D model.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
filters: An `int` of number of filters for the first convolution of the
layer.
temporal_kernel_sizes: A tuple that specifies the temporal kernel sizes
for each block in the current group.
temporal_strides: An `int` of temporal strides for the first convolution
in this group.
spatial_strides: An `int` stride to use for the first convolution of the
layer. If greater than 1, this layer will downsample the input.
block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`.
block_repeats: An `int` of number of blocks contained in the layer.
stochastic_depth_drop_rate: A `float` of drop rate of the current block
group.
use_self_gating: A `bool` that specifies whether to apply self-gating
module or not.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
if len(temporal_kernel_sizes) != block_repeats:
raise ValueError(
'Number of elements in `temporal_kernel_sizes` must equal to `block_repeats`.'
)
# Only apply self-gating module in the last block.
use_self_gating_list = [False] * (block_repeats - 1) + [use_self_gating]
x = block_fn(
filters=filters,
temporal_kernel_size=temporal_kernel_sizes[0],
temporal_strides=temporal_strides,
spatial_strides=spatial_strides,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
use_self_gating=use_self_gating_list[0],
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for i in range(1, block_repeats):
x = block_fn(
filters=filters,
temporal_kernel_size=temporal_kernel_sizes[i],
temporal_strides=1,
spatial_strides=1,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
use_self_gating=use_self_gating_list[i],
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'temporal_strides': self._temporal_strides,
'temporal_kernel_sizes': self._temporal_kernel_sizes,
'stem_type': self._stem_type,
'stem_conv_temporal_kernel_size': self._stem_conv_temporal_kernel_size,
'stem_conv_temporal_stride': self._stem_conv_temporal_stride,
'stem_pool_temporal_stride': self._stem_pool_temporal_stride,
'use_self_gating': self._use_self_gating,
'se_ratio': self._se_ratio,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('resnet_3d')
def build_resnet3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds ResNet 3d backbone from a config."""
backbone_cfg = backbone_config.get()
# Flatten configs before passing to the backbone.
temporal_strides = []
temporal_kernel_sizes = []
use_self_gating = []
for block_spec in backbone_cfg.block_specs:
temporal_strides.append(block_spec.temporal_strides)
temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes)
use_self_gating.append(block_spec.use_self_gating)
return ResNet3D(
model_id=backbone_cfg.model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
use_self_gating=use_self_gating,
input_specs=input_specs,
stem_type=backbone_cfg.stem_type,
stem_conv_temporal_kernel_size=backbone_cfg
.stem_conv_temporal_kernel_size,
stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride,
stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
se_ratio=backbone_cfg.se_ratio,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
@factory.register_backbone_builder('resnet_3d_rs')
def build_resnet3d_rs(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds ResNet-3D-RS backbone from a config."""
backbone_cfg = backbone_config.get()
# Flatten configs before passing to the backbone.
temporal_strides = []
temporal_kernel_sizes = []
use_self_gating = []
for i, block_spec in enumerate(backbone_cfg.block_specs):
temporal_strides.append(block_spec.temporal_strides)
use_self_gating.append(block_spec.use_self_gating)
block_repeats_i = RESNET_SPECS[backbone_cfg.model_id][i][-1]
temporal_kernel_sizes.append(list(block_spec.temporal_kernel_sizes) *
block_repeats_i)
return ResNet3D(
model_id=backbone_cfg.model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
use_self_gating=use_self_gating,
input_specs=input_specs,
stem_type=backbone_cfg.stem_type,
stem_conv_temporal_kernel_size=backbone_cfg
.stem_conv_temporal_kernel_size,
stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride,
stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
se_ratio=backbone_cfg.se_ratio,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| [
"[email protected]"
] | |
1e0051a965e57364e9feb7ed0b8838ad82f4619a | 95978243568bee7c33a2d6c100f42e0c5a15c933 | /coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py | 473b7c683407b03e7a405cbd4224e62ba2dda7be | [
"BSD-3-Clause"
] | permissive | cclauss/coremltools | 6cc68874c45ce1035b1b59417eacfdce738d725d | 0c63b0aeb63acedce0d39446c19b80cc47d57a7a | refs/heads/master | 2023-05-01T16:56:43.042564 | 2023-04-14T20:16:24 | 2023-04-14T20:16:24 | 106,254,817 | 0 | 1 | null | 2017-10-09T08:11:43 | 2017-10-09T08:11:43 | null | UTF-8 | Python | false | false | 6,920 | py | # Copyright (c) 2022, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom coremltools.converters.mil.mil import types
import numpy as np
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.input_type import (InputSpec,
TensorInputType,
TupleInputType)
from coremltools.converters.mil.mil.operation import Operation
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
@register_op(opset_version=_IOS16_TARGET)
class reshape_like(Operation):
"""
Reshape a tensor to an output shape specified by some or all dimensions of a tuple of reference tensors ``ref_tensors``.
Parameters
----------
x: tensor<\*?, T> (Required)
* The input tensor to be reshaped.
ref_tensors: Tuple[tensor<\*?, R>] (Required)
* A tuple of tensors that define the output shape.
begins: Tuple[const<int32>] (Required)
* A tuple of integers specifying the begin index into the shape vector of the corresponding ``ref_tensor``.
ends: Tuple[const<int32>] (Required)
* A tuple of integers specifying the end index into the shape vector of the corresponding ``ref_tensor``.
end_masks: Tuple[const<bool>] (Required)
* If ``True``, select all axes from the begin index until the end of the corresponding ``ref_tensor``, as in
``ref_tensors[i].shape[begins[i]:]``.
Notes
-----
The output shape is computed as follows:
.. sourcecode:: python
output_shape = []
num_of_refs = len(begins)
for i in range(num_of_refs):
if end_masks[i]:
output_shape.append(ref_tensor_i.shape[begins[i]:])
else:
output_shape.append(ref_tensor_i.shape[begins[i]:ends[i]])
output_shape = np.concat(output_shape, axis=0)
The following is an example:
.. sourcecode:: python
ref_tensors=[tensor[2, 3, 4], tensor[1, 5, 6]]
begins=[0, 1]
ends=[2, 0]
end_masks=[False, True]
The output shape would be ``(2, 3, 5, 6)``.
Returns
-------
tensor<\*?, T>
* Same type as input tensor ``x``.
* Output shape is computed by ``ref_tensors``, ``begins``, ``ends``, and ``end_masks``.
Attributes
----------
T: fp16, fp32, i32, bool
R: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
ref_tensors=TupleInputType(),
begins=TupleInputType(),
ends=TupleInputType(),
end_masks=TupleInputType(),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def _check_is_const_tuple_with_scalar(self, param, expected_type, param_name):
"""
This utility function checks the param is a Tuple of scalar with expected data type.
"""
for x in param:
if x.dtype != expected_type or x.shape != ():
msg = "In op reshape_like {}, {} must be a Tuple of scalar {}. Got a {} tensor with shape {}.".format(
self.name,
param_name,
expected_type.__type_info__(),
x.dtype.__type_info__(),
x.shape,
)
raise ValueError(msg)
def type_inference(self):
# Validation the inputs
ref_number = len(self.ref_tensors)
if len(self.begins) != ref_number or len(self.ends) != ref_number or len(self.end_masks) != ref_number:
msg = (
"Op reshape_like {}'s ref_tensors, begins, ends and end_masks must have exactly the same length. "
"Got {}, {}, {} and {}."
).format(self.name, ref_number, len(self.begins), len(self.ends), len(self.end_masks))
self._check_is_const_tuple_with_scalar(self.begins, types.int32, "begins")
self._check_is_const_tuple_with_scalar(self.ends, types.int32, "ends")
self._check_is_const_tuple_with_scalar(self.end_masks, types.bool, "end_masks")
# Compute the output shape
out_shape = ()
for ref_tensor, begin, end, end_mask in zip(self.ref_tensors, self.begins, self.ends, self.end_masks):
shape = ref_tensor.shape
begin, end, end_mask = begin.val, end.val, end_mask.val
ref_shape = shape[begin:end] if not end_mask else shape[begin:]
out_shape += tuple(ref_shape)
# Output shape must be known at compile time
if any_symbolic(out_shape):
msg = "Output shape of a reshape_like op {} must not be symbolic. Got {}".format(self.name, out_shape)
raise ValueError(msg)
# Output shape must be consistent with the input shape
if not any_symbolic(self.x.shape):
if np.prod(self.x.shape) != np.prod(out_shape):
msg = "At reshape_like op {}, input shape {} not consistent with the output shape {}.".format(
self.name,
self.x.shape,
out_shape
)
raise ValueError(msg)
return types.tensor(self.x.dtype, out_shape)
@register_op(opset_version=_IOS16_TARGET)
class pixel_unshuffle(Operation):
"""
Rearrange elements in a tensor from spatial dimensions into depth (channel).
It is basically the inverse operation of `pixel_shuffle <#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_transformation.pixel_shuffle>`_.
Equivalent to PyTorch's ``PixelUnshuffle``.
Parameters
----------
x: tensor<[n, C, H / f , W / f], T> (Required)
* Input tensor of rank ``4``.
downscale_factor: const<i32>
* Factor to decrease spatial resolution by.
Returns
-------
tensor<[n, C * f^2, H, W], T>
* Where ``f`` is the downscale factor.
Attributes
----------
T: fp16, fp32
References
----------
`torch.nn.PixelUnshuffle <https://pytorch.org/docs/stable/generated/torch.nn.PixelUnshuffle.html>`_
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
downscale_factor=TensorInputType(const=True, type_domain=types.uint32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_type = self.x.dtype
n, c, h, w = self.x.shape
f = self.downscale_factor.val
ret_shape = (n, c * f * f, h / f, w / f)
return types.tensor(x_type, ret_shape)
| [
"[email protected]"
] | |
729cda05f7cf671ee84fa46f47321cf0aba2496a | b1c97831338b0c2f8099a56f23bddf394561e0e3 | /Chapter 05/queueJoin.py | 440160eac7fb2079148b0631868f55e20a0c9aca | [
"MIT"
] | permissive | PacktPublishing/Learning-Concurrency-in-Python | 03b242f64a0b6515a41ceccab86936dc54b20e15 | bafc928ce9edc601e3def4441b51555ede13c973 | refs/heads/master | 2023-02-06T00:51:37.297067 | 2023-01-30T08:05:25 | 2023-01-30T08:05:25 | 100,243,064 | 77 | 51 | MIT | 2018-10-26T12:14:45 | 2017-08-14T08:03:03 | Python | UTF-8 | Python | false | false | 530 | py | import threading
import queue
import time
def mySubscriber(queue):
time.sleep(1)
while not queue.empty():
item = queue.get()
if item is None:
break
print("{} removed {} from the queue".format(threading.current_thread(), item))
queue.task_done()
myQueue = queue.Queue()
for i in range(5):
myQueue.put(i)
print("Queue Populated")
thread = threading.Thread(target=mySubscriber, args=(myQueue,))
thread.start()
print("Not Progressing Till Queue is Empty")
myQueue.join()
print("Queue is now empty")
| [
"[email protected]"
] | |
bb8ab784baecc945d7331bba2ea31191ffddd9fb | 10d98fecb882d4c84595364f715f4e8b8309a66f | /f_net/configs/classification.py | 2d1e0d6deb8ebaa8cff5ae3928c884995cda95e1 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 2,352 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for fine-tuning on the GLUE and SuperGLUE benchmarks."""
from f_net.configs import base as base_config
from f_net.configs.base import ModelArchitecture
from f_net.configs.base import TrainingMode
def get_config():
"""Config for fine-tuning (classification)."""
config = base_config.get_config()
# Determines which model to use.
config.model_arch: ModelArchitecture = ModelArchitecture.F_NET
config.mode: TrainingMode = TrainingMode.CLASSIFICATION
# This is either "glue/DS_g", where DS_g is one of the following:
# [cola, sst2, mrpc, qqp, stsb, mnli, qnli, rte, wnli].
config.dataset_name: str = "glue/rte"
# How often to save the model checkpoint.
config.save_checkpoints_steps: int = 200
# Training metrics will be computed (1 / eval_proportion) times during
# training at regularly spaced intervals, regardless of dataset size.
config.eval_proportion: float = 0.05
# Total batch size for training.
config.train_batch_size: int = 64
# Total batch size for eval (and predictions).
config.eval_batch_size: int = 32
# The base learning rate for Adam.
config.learning_rate: float = 1e-5
# Total number of training epochs to perform.
config.num_train_epochs: float = 3
# Proportion of training to perform linear learning rate warmup for.
# E.g., 0.1 = 10% of training steps.
config.warmup_proportion: float = 0.1
# Maximum number of eval steps on validation split. Actual number of step may
# be less for small eval datasets.
config.max_num_eval_steps: int = int(1e5)
# Initial checkpoint directory or filepath (usually from a pre-trained model).
config.init_checkpoint_dir: str = ""
# Dummy attribute for repeated runs.
config.trial: int = 0
return config
| [
"[email protected]"
] | |
09d1805c4d1b7e98881a9a7b658b3a20a6a61e32 | 2c07ae4239d217f4a4b3d356ca4be31629dea4d5 | /assets.py | 2c3cc15e2b9d049fc77ed0707642012353ae75fc | [] | no_license | themylogin/thelogin.ru | 771afe0e3afbdc1072695fb2d4920f6ec3b7c6d3 | 1f66ff940dfafe6d065c63e832d51b5e16522edc | refs/heads/master | 2023-04-25T22:02:56.004822 | 2020-03-22T18:02:40 | 2020-03-22T18:02:40 | 7,691,963 | 0 | 0 | null | 2023-04-15T01:17:22 | 2013-01-18T18:31:57 | Python | UTF-8 | Python | false | false | 1,157 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cssmin
import glob
import jsmin
import os
from config import config
assets = {}
assets_dir = os.path.join(config.path, "asset")
for asset, function, separator in [("css", cssmin.cssmin, ""), ("js", jsmin.jsmin, ";")]:
asset_dir = os.path.join(assets_dir, asset)
asset_list = filter(lambda filename: filename.endswith("." + asset) and not filename.startswith("packed-"), sorted(os.listdir(asset_dir)))
if config.debug:
assets[asset] = asset_list
else:
asset_time = int(max([os.stat(os.path.join(asset_dir, filename)).st_mtime for filename in asset_list]))
asset_packed = "packed-%d.%s" % (asset_time, asset)
asset_packed_path = os.path.join(asset_dir, asset_packed)
if not os.path.exists(asset_packed_path):
map(os.unlink, glob.glob(os.path.join(asset_dir, "packed-*")))
open(asset_packed_path, "w").write(separator.join([
(function if function else lambda x: x)(open(os.path.join(asset_dir, filename)).read())
for filename in asset_list
]))
assets[asset] = [asset_packed]
| [
"[email protected]"
] | |
a6c00c649c8adf9f2fd19814f9961b8051ffa0c4 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/dccf58efe393b6912faf2e89cf7e87942e28273e-<_connect>-fix.py | 166d99bac0993991bbe9ef5d461007d89ccd7f3a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | def _connect(self):
if (not HAS_WINRM):
raise AnsibleError(('winrm or requests is not installed: %s' % to_text(WINRM_IMPORT_ERR)))
elif (not HAS_XMLTODICT):
raise AnsibleError(('xmltodict is not installed: %s' % to_text(XMLTODICT_IMPORT_ERR)))
super(Connection, self)._connect()
if (not self.protocol):
self.protocol = self._winrm_connect()
self._connected = True
return self | [
"[email protected]"
] | |
68f4a6ea2404ea0483d10235fd5a32b3766bdc86 | bbe6f37f7347cb83f08846d505ac4aa6bc0031e6 | /purity_fb/purity_fb_1dot9/apis/targets_api.py | 2e7e2860b4552661aeceb860da0277f2a1e17e47 | [
"Apache-2.0"
] | permissive | bsamz-ps/purity_fb_python_client | 02ff7213075cf1948e2db7b0835cc5fcc56f328a | 11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4 | refs/heads/master | 2021-02-19T08:11:04.042758 | 2020-02-12T23:56:08 | 2020-02-12T23:56:08 | 245,294,511 | 0 | 0 | NOASSERTION | 2020-03-06T00:14:27 | 2020-03-06T00:14:26 | null | UTF-8 | Python | false | false | 28,372 | py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK. Compatible with REST API versions 1.0 - 1.9. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class TargetsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_targets(self, target, **kwargs):
"""
Create a new target.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_targets(target, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TargetPost target: The attribute map used to create the target. (required)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: TargetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_targets_with_http_info(target, **kwargs)
else:
(data) = self.create_targets_with_http_info(target, **kwargs)
return data
def create_targets_with_http_info(self, target, **kwargs):
"""
Create a new target.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_targets_with_http_info(target, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TargetPost target: The attribute map used to create the target. (required)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: TargetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target', 'names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_targets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target' is set
if ('target' not in params) or (params['target'] is None):
raise ValueError("Missing the required parameter `target` when calling `create_targets`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'target' in params:
body_params = params['target']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/targets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TargetResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_targets(self, **kwargs):
"""
Delete a target.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_targets(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_targets_with_http_info(**kwargs)
else:
(data) = self.delete_targets_with_http_info(**kwargs)
return data
def delete_targets_with_http_info(self, **kwargs):
"""
Delete a target.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_targets_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ids', 'names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_targets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/targets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_targets(self, **kwargs):
"""
List targets.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_targets(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:return: TargetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_targets_with_http_info(**kwargs)
else:
(data) = self.list_targets_with_http_info(**kwargs)
return data
def list_targets_with_http_info(self, **kwargs):
"""
List targets.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_targets_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:return: TargetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter', 'ids', 'limit', 'names', 'sort', 'start', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_targets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/targets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TargetResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_targets_performance_replication(self, **kwargs):
"""
List instant or historical target replication performance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_targets_performance_replication(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int end_time: Time to end sample in milliseconds since epoch.
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param int resolution: sample frequency in milliseconds
:param str sort: The way to order the results.
:param int start: start
:param int start_time: Time to start sample in milliseconds since epoch.
:param str token: token
:param bool total_only: Return only the total object.
:param str type: to sample space of either file systems, object store, or all
:return: PerformanceReplicationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_targets_performance_replication_with_http_info(**kwargs)
else:
(data) = self.list_targets_performance_replication_with_http_info(**kwargs)
return data
def list_targets_performance_replication_with_http_info(self, **kwargs):
"""
List instant or historical target replication performance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_targets_performance_replication_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int end_time: Time to end sample in milliseconds since epoch.
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param int resolution: sample frequency in milliseconds
:param str sort: The way to order the results.
:param int start: start
:param int start_time: Time to start sample in milliseconds since epoch.
:param str token: token
:param bool total_only: Return only the total object.
:param str type: to sample space of either file systems, object store, or all
:return: PerformanceReplicationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['end_time', 'filter', 'ids', 'limit', 'names', 'resolution', 'sort', 'start', 'start_time', 'token', 'total_only', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_targets_performance_replication" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/targets/performance/replication', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PerformanceReplicationResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_targets(self, target, **kwargs):
"""
Update an existing target.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_targets(target, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Target target: The attribute map used to update the target. (required)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: TargetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_targets_with_http_info(target, **kwargs)
else:
(data) = self.update_targets_with_http_info(target, **kwargs)
return data
def update_targets_with_http_info(self, target, **kwargs):
"""
Update an existing target.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_targets_with_http_info(target, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Target target: The attribute map used to update the target. (required)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: TargetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target', 'ids', 'names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_targets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target' is set
if ('target' not in params) or (params['target'] is None):
raise ValueError("Missing the required parameter `target` when calling `update_targets`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'target' in params:
body_params = params['target']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/targets', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TargetResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
42a408a3006e99347d1520d5d60ec745b032b23a | 289e56599c3755e81d642a104b7861a4fab91142 | /pub/entities.py | 4d4df366c77dc0f6cb6db6143db7e20391423b43 | [] | no_license | chaselgrove/cs-pub-portal | 79ebae04b6dc1537cb10a15ddb369fd4636ebd8d | 657e157585ac42b157cd05ed3e281f3f293b6144 | refs/heads/master | 2021-01-17T14:30:49.850801 | 2016-07-28T18:29:22 | 2016-07-28T18:29:22 | 52,994,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,472 | py | from collections import OrderedDict
from . import errors
from .utils import annot_url
from .fields import *
class Entity(object):
"""base class for entities"""
@classmethod
def _get_from_def(cls, pub, id, values):
obj = cls(pub, id)
for (annotation_id, name, value) in values:
obj.annotation_ids.add(annotation_id)
if name in obj.fields:
obj.fields[name].set(value)
else:
err = errors.UnknownFieldError(name)
obj.errors.append(err)
return obj
@classmethod
def _clear_pmid(cls, pmid, cursor):
query = "DELETE FROM %s WHERE publication = %%s" % cls.table
cursor.execute(query, (pmid, ))
return
def __init__(self, pub, id):
self.pub = pub
self.id = id
self.annotation_ids = set()
self.errors = []
self.points = []
self.links = []
self.fields = OrderedDict()
for (key, cls, display_name) in self.field_defs:
self.fields[key] = cls(display_name)
return
def __getitem__(self, key):
return self.fields[key].value
def _insert_annotations(self, cursor):
query = """INSERT INTO entity_annotation (publication,
entity_type,
entity_id,
annotation_id)
VALUES (%s, %s, %s, %s)"""
for annotation_id in self.annotation_ids:
params = (self.pub.pmid,
self.table,
self.id,
annotation_id)
cursor.execute(query, params)
return
def _insert_errors(self, cursor):
query = """INSERT INTO entity_error (publication,
entity_type,
entity_id,
error_type,
data)
VALUES (%s, %s, %s, %s, %s)"""
for error in self.errors:
params = (self.pub.pmid,
self.table,
self.id,
error.__class__.__name__,
error.data)
cursor.execute(query, params)
return
@classmethod
def _get_annotation_ids_from_db(cls, pub, d, cursor):
query = """SELECT entity_id, annotation_id
FROM entity_annotation
WHERE publication = %s
AND entity_type = %s"""
cursor.execute(query, (pub.pmid, cls.table))
for (entity_id, annotation_id) in cursor:
d[entity_id].annotation_ids.add(annotation_id)
return
@classmethod
def _get_errors_from_db(cls, pub, d, cursor):
query = """SELECT entity_id, error_type, data
FROM entity_error
WHERE publication = %s
AND entity_type = %s"""
cursor.execute(query, (pub.pmid, cls.table))
for (entity_id, error_type, data) in cursor:
cls = getattr(errors, error_type)
err = cls(data)
d[entity_id].errors.append(err)
return
def set_related(self):
"""set related entities"""
return
def get_scores(self):
"""obj.get_scores() -> (score, maximum possible score)"""
s = 0
max = 0
for (val, _) in self.points:
if val > 0:
max += val
s += val
return (s, max)
def annotation_links(self):
for annot_id in self.annotation_ids:
url = annot_url(annot_id)
yield '<a href="%s">%s</a>' % (url, annot_id)
return
class SubjectGroup(Entity):
field_defs = (('diagnosis', Field, 'Diagnosis'),
('nsubjects', Field, 'Subjects'),
('agemean', Field, 'Age mean'),
('agesd', Field, 'Age SD'))
table = 'subject_group'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM subject_group
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = SubjectGroup(pub, row_dict['id'])
obj.fields['diagnosis'].set(row_dict['diagnosis'])
obj.fields['nsubjects'].set(row_dict['n_subjects'])
obj.fields['agemean'].set(row_dict['age_mean'])
obj.fields['agesd'].set(row_dict['age_sd'])
d[row_dict['id']] = obj
SubjectGroup._get_annotation_ids_from_db(pub, d, cursor)
SubjectGroup._get_errors_from_db(pub, d, cursor)
return d
def _insert(self, cursor):
query = """INSERT INTO subject_group (publication,
id,
diagnosis,
n_subjects,
age_mean,
age_sd)
VALUES (%s, %s, %s, %s, %s, %s)"""
params = (self.pub.pmid,
self.id,
self['diagnosis'],
self['nsubjects'],
self['agemean'],
self['agesd'])
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def score(self):
self.points.append((5, 'Existential credit'))
# check for missing fields
for (name, field) in self.fields.iteritems():
if not field.value:
self.points.append((-1, 'Missing %s' % name))
return
class AcquisitionInstrument(Entity):
field_defs = (('type', Field, 'Type'),
('location', Field, 'Location'),
('field', Field, 'Field'),
('manufacturer', Field, 'Manufacturer'),
('model', Field, 'Model'))
table = 'acquisition_instrument'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM acquisition_instrument
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = AcquisitionInstrument(pub, row_dict['id'])
obj.fields['type'].set(row_dict['type'])
obj.fields['location'].set(row_dict['location'])
obj.fields['field'].set(row_dict['field'])
obj.fields['manufacturer'].set(row_dict['manufacturer'])
obj.fields['model'].set(row_dict['model'])
d[row_dict['id']] = obj
AcquisitionInstrument._get_annotation_ids_from_db(pub, d, cursor)
AcquisitionInstrument._get_errors_from_db(pub, d, cursor)
return d
def _insert(self, cursor):
query = """INSERT INTO acquisition_instrument (publication,
id,
type,
location,
field,
manufacturer,
model)
VALUES (%s, %s, %s, %s, %s, %s, %s)"""
params = (self.pub.pmid,
self.id,
self['type'],
self['location'],
self['field'],
self['manufacturer'],
self['model'])
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def score(self):
self.points.append((7, 'Existential credit'))
# check for missing fields
for (name, field) in self.fields.iteritems():
if not field.value:
if name == 'field':
self.points.append((-2, 'Missing %s' % name))
else:
self.points.append((-1, 'Missing %s' % name))
return
class Acquisition(Entity):
field_defs = (('type', Field, 'Type'),
('acquisitioninstrument', Field, 'Acquisition Instrument'),
('nslices', Field, 'N Slices'),
('prep', Field, 'Prep'),
('tr', Field, 'TE'),
('te', Field, 'TR'),
('ti', Field, 'TI'),
('flipangle', Field, 'Flip Angle'),
('fov', Field, 'FOV'),
('slicethickness', Field, 'Slice Thickness'),
('matrix', Field, 'Matrix'),
('nexcitations', Field, 'N Excitations'))
table = 'acquisition'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM acquisition
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = Acquisition(pub, row_dict['id'])
val = row_dict['acquisition_instrument']
obj.fields['acquisitioninstrument'].set(val)
obj.fields['type'].set(row_dict['type'])
obj.fields['nslices'].set(row_dict['n_slice'])
obj.fields['prep'].set(row_dict['prep'])
obj.fields['tr'].set(row_dict['tr'])
obj.fields['te'].set(row_dict['te'])
obj.fields['ti'].set(row_dict['ti'])
obj.fields['flipangle'].set(row_dict['flip_angle'])
obj.fields['fov'].set(row_dict['fov'])
obj.fields['slicethickness'].set(row_dict['slice_thickness'])
obj.fields['matrix'].set(row_dict['matrix'])
obj.fields['nexcitations'].set(row_dict['n_excitations'])
d[row_dict['id']] = obj
Acquisition._get_annotation_ids_from_db(pub, d, cursor)
Acquisition._get_errors_from_db(pub, d, cursor)
return d
def _insert(self, cursor):
query = """INSERT INTO acquisition (publication,
id,
acquisition_instrument,
type,
n_slice,
prep,
tr,
te,
ti,
flip_angle,
fov,
slice_thickness,
matrix,
n_excitations)
VALUES (%s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s)"""
if self.acquisition_instrument:
ai = self.acquisition_instrument.id
else:
ai = None
params = (self.pub.pmid,
self.id,
ai,
self['type'],
self['nslices'],
self['prep'],
self['tr'],
self['te'],
self['ti'],
self['flipangle'],
self['fov'],
self['slicethickness'],
self['matrix'],
self['nexcitations'])
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def set_related(self):
ai_id = self['acquisitioninstrument']
if ai_id is None:
self.acquisition_instrument = None
elif ai_id not in self.pub.entities['AcquisitionInstrument']:
self.fields['acquisitioninstrument'].reset()
self.acquisition_instrument = None
msg = 'Undefined acquisition instrument "%s"' % ai_id
self.errors.append(errors.LinkError(msg))
else:
ai = self.pub.entities['AcquisitionInstrument'][ai_id]
self.acquisition_instrument = ai
return
def score(self):
self.points.append((3, 'Existential credit'))
# check for missing fields
if not self['type']:
self.points.append((-1, 'Missing type'))
if not self.acquisition_instrument:
self.points.append((-1, 'Missing acquisition instrument'))
return
class Data(Entity):
field_defs = (('url', URLField, 'URL'),
('doi', DOIField, 'DOI'),
('acquisition', Field, 'Acquisition'),
('subjectgroup', Field, 'Subject Group'))
table = 'data'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM data
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = Data(pub, row_dict['id'])
obj.fields['url'].set(row_dict['url'])
obj.fields['doi'].set(row_dict['doi'])
obj.fields['acquisition'].set(row_dict['acquisition'])
obj.fields['subjectgroup'].set(row_dict['subject_group'])
d[row_dict['id']] = obj
Data._get_annotation_ids_from_db(pub, d, cursor)
Data._get_errors_from_db(pub, d, cursor)
return d
@classmethod
def _clear_pmid(cls, pmid, cursor):
query = "DELETE FROM dataXobservation WHERE publication = %s"
cursor.execute(query, (pmid, ))
super(Data, cls)._clear_pmid(pmid, cursor)
return
def __init__(self, pub, id):
super(Data, self).__init__(pub, id)
# set in Observation.set_related()
self.observations = []
return
def _insert(self, cursor):
query = """INSERT INTO data (publication,
id,
acquisition,
subject_group,
url,
doi)
VALUES (%s, %s, %s, %s, %s, %s)"""
if self.acquisition:
aquisition = self.acquisition.id
else:
aquisition = None
params = (self.pub.pmid,
self.id,
aquisition,
self.subject_group.id,
self['url'],
self['doi'])
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def set_related(self):
a_id = self['acquisition']
if a_id is None:
self.acquisition = None
elif a_id not in self.pub.entities['Acquisition']:
self.fields['acquisition'].reset()
self.acquisition = None
err = errors.LinkError('Undefined acquisition "%s"' % a_id)
self.errors.append(err)
else:
self.acquisition = self.pub.entities['Acquisition'][a_id]
sg_id = self['subjectgroup']
if sg_id is None:
self.subject_group = None
if sg_id not in self.pub.entities['SubjectGroup']:
self.fields['subjectgroup'].reset()
self.subject_group = None
err = LinkError('Undefined subjectgroup "%s"' % sg_id)
self.errors.append(err)
else:
self.subject_group = self.pub.entities['SubjectGroup'][sg_id]
return
def score(self):
self.points.append((10, 'Existential credit'))
if not self['url'] and not self['doi']:
self.points.append((-5, 'No link to data (DOI or URL)'))
if not self.subject_group:
self.points.append((-1, 'Missing subject group'))
if not self.acquisition:
self.points.append((-1, 'Missing acquisition'))
return
class AnalysisWorkflow(Entity):
field_defs = (('method', Field, 'Method'),
('methodurl', URLField, 'Method URL'),
('software', Field, 'Software'),
('softwarenitrcid', NITRCIDField, 'NITRC ID'),
('softwarerrid', RRIDField, 'Software RRID'),
('softwareurl', URLField, 'Software URL'))
table = 'analysis_workflow'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM analysis_workflow
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = AnalysisWorkflow(pub, row_dict['id'])
obj.fields['method'].set(row_dict['method'])
obj.fields['methodurl'].set(row_dict['methodurl'])
obj.fields['software'].set(row_dict['software'])
obj.fields['softwarenitrcid'].set(row_dict['software_nitrc_id'])
obj.fields['softwarerrid'].set(row_dict['software_rrid'])
obj.fields['softwareurl'].set(row_dict['software_url'])
d[row_dict['id']] = obj
AnalysisWorkflow._get_annotation_ids_from_db(pub, d, cursor)
AnalysisWorkflow._get_errors_from_db(pub, d, cursor)
return d
def _insert(self, cursor):
query = """INSERT INTO analysis_workflow (publication,
id,
method,
methodurl,
software,
software_nitrc_id,
software_rrid,
software_url)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
params = (self.pub.pmid,
self.id,
self['method'],
self['methodurl'],
self['software'],
self['softwarenitrcid'],
self['softwarerrid'],
self['softwareurl'])
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def score(self):
self.points.append((7, 'Existential credit'))
if not self['method']:
self.points.append((-1, 'Missing method'))
if not self['methodurl']:
self.points.append((-2, 'Missing method URL'))
if not self['software']:
self.points.append((-1, 'Missing software'))
if not self['softwarenitrcid'] \
and not self['softwarerrid'] \
and not self['softwareurl']:
self.points.append((-2, 'Missing software link'))
return
class Observation(Entity):
field_defs = (('data', MultiField, 'Data'),
('analysisworkflow', Field, 'Analysis Workflow'),
('measure', Field, 'Measure'))
table = 'observation'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM observation
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = Observation(pub, row_dict['id'])
obj.fields['analysisworkflow'].set(row_dict['analysis_workflow'])
obj.fields['measure'].set(row_dict['measure'])
d[row_dict['id']] = obj
query = """SELECT observation, data
FROM dataXobservation
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
for (observation_id, data_id) in cursor:
d[observation_id].fields['data'].set(data_id)
Observation._get_annotation_ids_from_db(pub, d, cursor)
Observation._get_errors_from_db(pub, d, cursor)
return d
@classmethod
def _clear_pmid(cls, pmid, cursor):
query = "DELETE FROM dataXobservation WHERE publication = %s"
cursor.execute(query, (pmid, ))
query = """DELETE FROM observationXmodel_application
WHERE publication = %s"""
cursor.execute(query, (pmid, ))
super(Observation, cls)._clear_pmid(pmid, cursor)
return
def __init__(self, pub, id):
super(Observation, self).__init__(pub, id)
# set in ModelApplication.set_related()
self.model_applications = []
return
def _insert(self, cursor):
query = """INSERT INTO observation (publication,
id,
analysis_workflow,
measure)
VALUES (%s, %s, %s, %s)"""
if self.analysis_workflow:
aw = self.analysis_workflow.id
else:
aw = None
params = (self.pub.pmid,
self.id,
aw,
self['measure'])
cursor.execute(query, params)
query = """INSERT INTO dataXobservation (publication,
data,
observation)
VALUES (%s, %s, %s)"""
for data in self.data:
params = (self.pub.pmid, data.id, self.id)
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def set_related(self):
aw_id = self['analysisworkflow']
if aw_id is None:
self.analysis_workflow = None
elif aw_id not in self.pub.entities['AnalysisWorkflow']:
self.fields['analysisworkflow'].reset()
self.analysis_workflow = None
err = errors.LinkError('Undefined analysis workflow "%s"' % aw)
self.errors.append(err)
else:
aw = self.pub.entities['AnalysisWorkflow'][aw_id]
self.analysis_workflow = aw
self.data = []
if self['data']:
d_ids = list(self['data'])
self.fields['data'].reset()
for d_id in d_ids:
if d_id not in self.pub.entities['Data']:
err = errors.LinkError('Undefined data "%s"' % d_id)
self.errors.append(err)
else:
self.fields['data'].set(d_id)
d = self.pub.entities['Data'][d_id]
self.data.append(d)
d.observations.append(self)
return
def score(self):
self.points.append((10, 'Existential credit'))
if not self['measure']:
self.points.append((-5, 'Missing measure'))
if not self.data:
self.points.append((-2, 'Missing data'))
if not self.analysis_workflow:
self.points.append((-2, 'Missing analysis workflow'))
return
class Model(Entity):
field_defs = (('type', Field, 'Type'),
('variable', MultiField, 'Variables'))
table = 'model'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM model
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = Model(pub, row_dict['id'])
obj.fields['type'].set(row_dict['type'])
d[row_dict['id']] = obj
query = """SELECT model, variable
FROM model_variable
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
for (model_id, variable) in cursor:
d[model_id].fields['variable'].set(variable)
Model._get_annotation_ids_from_db(pub, d, cursor)
Model._get_errors_from_db(pub, d, cursor)
return d
@classmethod
def _clear_pmid(cls, pmid, cursor):
query = "DELETE FROM model_variable WHERE publication = %s"
cursor.execute(query, (pmid, ))
super(Model, cls)._clear_pmid(pmid, cursor)
return
def _insert(self, cursor):
query = "INSERT INTO model (publication, id, type) VALUES (%s, %s, %s)"
params = (self.pub.pmid, self.id, self['type'])
cursor.execute(query, params)
if self['variable'] is not None:
query = """INSERT INTO model_variable (publication,
model,
variable)
VALUES (%s, %s, %s)"""
for val in self['variable']:
params = (self.pub.pmid, self.id, val)
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def score(self):
self.points.append((10, 'Existential credit'))
# check if any variables are defined
# check for bad interaction variables
if not self['type']:
self.points.append((-4, 'No model type defined'))
if not self['variable']:
self.points.append((-4, 'No variables defined'))
else:
simple_vars = []
int_vars = []
bad_components = set()
for var in self['variable']:
if '+' in var:
int_vars.append(var)
else:
simple_vars.append(var)
for int_var in int_vars:
for int_component in int_var.split('+'):
if int_component not in simple_vars:
bad_components.add(int_component)
if bad_components:
vars = ', '.join(sorted(bad_components))
msg = 'Variables only in interaction terms: %s' % vars
self.points.append((-2, msg))
return
class ModelApplication(Entity):
field_defs = (('observation', MultiField, 'Observations'),
('model', Field, 'Model'),
('url', URLField, 'URL'),
('software', Field, 'Software'))
table = 'model_application'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM model_application
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = ModelApplication(pub, row_dict['id'])
obj.fields['model'].set(row_dict['model'])
obj.fields['url'].set(row_dict['url'])
obj.fields['software'].set(row_dict['software'])
d[row_dict['id']] = obj
query = """SELECT observation, model_application
FROM observationXmodel_application
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
for (observation_id, model_application_id) in cursor:
d[model_application_id].fields['observation'].set(observation_id)
ModelApplication._get_annotation_ids_from_db(pub, d, cursor)
ModelApplication._get_errors_from_db(pub, d, cursor)
return d
@classmethod
def _clear_pmid(cls, pmid, cursor):
query = """DELETE FROM observationXmodel_application
WHERE publication = %s"""
cursor.execute(query, (pmid, ))
super(ModelApplication, cls)._clear_pmid(pmid, cursor)
return
def _insert(self, cursor):
query = """INSERT INTO model_application (publication,
id,
model,
url,
software)
VALUES (%s, %s, %s, %s, %s)"""
if self.model:
model = self.model.id
else:
model = None
params = (self.pub.pmid,
self.id,
model,
self['url'],
self['software'])
cursor.execute(query, params)
query = """INSERT INTO observationXmodel_application
(publication, observation, model_application)
VALUES (%s, %s, %s)"""
print '=', self.id
for obs in self.observations:
print obs.id
params = (self.pub.pmid, obs.id, self.id)
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def set_related(self):
m_id = self['model']
if m_id is None:
self.model = None
elif m_id not in self.pub.entities['Model']:
self.fields['model'].reset()
self.model = None
self.errors.append(errors.LinkError('Undefined model "%s"' % m_id))
else:
self.model = self.pub.entities['Model'][m_id]
self.observations = []
if self['observation']:
# make a copy
o_ids = list(self['observation'])
self.fields['observation'].reset()
for o_id in o_ids:
if o_id not in self.pub.entities['Observation']:
err = errors.LinkError('Undefined observation "%s"' % o_id)
self.errors.append(err)
else:
self.fields['observation'].set(o_id)
obs = self.pub.entities['Observation'][o_id]
self.observations.append(obs)
obs.model_applications.append(self)
return
def score(self):
self.points.append((11, 'Existential credit'))
if not self['url']:
self.points.append((-5, 'No link to analysis'))
if not self['software']:
self.points.append((-1, 'Missing software'))
if not self.model:
self.points.append((-2, 'Missing model'))
if not self['observation']:
self.points.append((-2, 'Missing observation(s)'))
return
class Result(Entity):
field_defs = (('modelapplication', Field, 'Model Application'),
('value', Field, 'Value'),
('variable', MultiField, 'Variables'),
('f', Field, 'f'),
('p', Field, 'p'),
('interpretation', Field, 'Interpretation'))
table = 'result'
@classmethod
def _get_from_db(cls, pub, cursor):
d = {}
query = """SELECT *
FROM result
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
cols = [ el[0] for el in cursor.description ]
for row in cursor:
row_dict = dict(zip(cols, row))
obj = Result(pub, row_dict['id'])
obj.fields['modelapplication'].set(row_dict['model_application'])
obj.fields['value'].set(row_dict['value'])
obj.fields['f'].set(row_dict['f'])
obj.fields['p'].set(row_dict['p'])
obj.fields['interpretation'].set(row_dict['interpretation'])
d[row_dict['id']] = obj
query = """SELECT result, variable
FROM result_variable
WHERE publication = %s"""
cursor.execute(query, (pub.pmid, ))
for (result_id, variable) in cursor:
d[result_id].fields['variable'].set(variable)
Result._get_annotation_ids_from_db(pub, d, cursor)
Result._get_errors_from_db(pub, d, cursor)
return d
@classmethod
def _clear_pmid(cls, pmid, cursor):
query = "DELETE FROM result_variable WHERE publication = %s"
cursor.execute(query, (pmid, ))
super(Result, cls)._clear_pmid(pmid, cursor)
return
def _insert(self, cursor):
query = """INSERT INTO result (publication,
id,
model_application,
value,
f,
p,
interpretation)
VALUES (%s, %s, %s, %s, %s, %s, %s)"""
if self.model_application:
ma = self.model_application.id
else:
ma = None
params = (self.pub.pmid,
self.id,
ma,
self['value'],
self['f'],
self['p'],
self['interpretation'])
cursor.execute(query, params)
if self['variable'] is not None:
query = """INSERT INTO result_variable (publication,
result,
variable)
VALUES (%s, %s, %s)"""
for val in self['variable']:
params = (self.pub.pmid, self.id, val)
cursor.execute(query, params)
self._insert_annotations(cursor)
self._insert_errors(cursor)
return
def set_related(self):
ma_id = self['modelapplication']
if ma_id is None:
self.model_application = None
elif ma_id not in self.pub.entities['ModelApplication']:
self.fields['modelapplication'].reset()
self.model_application = None
err = errors.LinkError('Undefined model application "%s"' % ma_id)
self.errors.append(err)
else:
ma = self.pub.entities['ModelApplication'][ma_id]
self.model_application = ma
return
def score(self):
self.points.append((23, 'Existential credit'))
if not self['value']:
self.points.append((-3, 'Missing "Value"'))
if not self['f']:
self.points.append((-2, 'Missing F'))
if not self['p']:
self.points.append((-5, 'Missing P'))
if not self['interpretation']:
self.points.append((-2, 'Missing interpretation'))
model_vars = []
if not self.model_application:
self.points.append((-5, 'Missing model application'))
elif self.model_application.model:
model_vars = self.model_application.model['variable']
if not self['variable']:
self.points.append((-5, 'Missing variable(s)'))
else:
bad_vars = set()
for var in self['variable']:
if var not in model_vars:
bad_vars.add(var)
if bad_vars:
fmt = 'Variables not defined in the model: %s'
msg = fmt % ', '.join(sorted(bad_vars))
self.points.append((-2, msg))
return
# entities[markup entity type] = entity class
# this order propagates to Publication.entities, whose order is used to put
# the entities in the database
entities = OrderedDict()
entities['SubjectGroup'] = SubjectGroup
entities['AcquisitionInstrument'] = AcquisitionInstrument
entities['Acquisition'] = Acquisition
entities['Data'] = Data
entities['AnalysisWorkflow'] = AnalysisWorkflow
entities['Observation'] = Observation
entities['Model'] = Model
entities['ModelApplication'] = ModelApplication
entities['Result'] = Result
# eof
| [
"[email protected]"
] | |
448bcd261807fa05a45781f836527a3b1d6f27f8 | 8d2e5b5ea408579faa699c09bdbea39e864cdee1 | /ufora/util/ThreadSafeDict.py | 75509f50e6a673f9c5cf633da81f7f424242c589 | [
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | iantuioti/ufora | 2218ef4c7e33c171268ce11458e9335be7421943 | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | refs/heads/master | 2021-01-17T17:08:39.228987 | 2017-01-30T16:00:45 | 2017-01-30T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import threading
import thread
class ThreadSafeDict:
def __init__(self, vals = dict()):
self.vals = dict(vals)
self.lock = threading.RLock()
def __str__(self):
return self.vals.__str__()
def __getitem__(self, index):
with self.lock:
return self.vals[index]
def __setitem__(self, index, val):
with self.lock:
self.vals[index] = val
def updateItem(self, index, f, noneisdel = True):
with self.lock:
self.vals[index] = f(self.vals[index] if self.vals.has_key(index) else None)
if noneisdel and self.vals[index] is None:
del self.vals[index]
def update(self, foreignDict):
with self.lock:
self.vals.update(foreignDict)
def has_key(self, index):
with self.lock:
return self.vals.has_key(index)
def keys(self):
with self.lock:
return list(self.vals.keys())
def __delitem__(self, key):
with self.lock:
del self.vals[key]
def __len__(self):
with self.lock:
return len(self.vals)
def scan(self,f):
with self.lock:
for k in self.vals:
f(k, self.vals[k])
def dictCopy(self):
with self.lock:
return dict(self.vals)
| [
"[email protected]"
] | |
e4cb407440b02134b7fb10f738ba19ed0698aa2f | 786de89be635eb21295070a6a3452f3a7fe6712c | /pyimgalgos/tags/V00-00-13/src/image_crop.py | 3c24a7427ffe5f080f718d92b120ac8d33ba992e | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,994 | py | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Pyana/psana user analysis module image_crop...
#
#------------------------------------------------------------------------
"""User analysis module for pyana and psana frameworks.
This software was developed for the LCLS project. If you use all or
part of it, please give an appropriate acknowledgment.
@version $Id: template!pyana-module!py 2987 2012-02-25 03:28:58Z [email protected] $
@author Mikhail S. Dubrovin
"""
#------------------------------
# Module's version from SVN --
#------------------------------
__version__ = "$Revision: 2987 $"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import logging
#-----------------------------
# Imports for other modules --
#-----------------------------
#from psana import *
import numpy as np
class image_crop (object) :
"""Get image from the evt store crop it and put it back in the evt store"""
def __init__ ( self ) :
"""Class constructor.
Parameters are passed from pyana.cfg configuration file.
All parameters are passed as strings
@param source string, address of Detector.Id:Device.ID
@param key_in string, keyword for input image 2-d array
@param key_out string, unique keyword for output image array
@param rowmin int, row minimal to crop image (dafault = 0 - for full size)
@param rowmax int, row maximal to crop image (dafault = -1 - for full size)
@param colmin int, column minimal to crop image (dafault = 0 - for full size)
@param colmax int, column maximal to crop image (dafault = -1 - for full size)
@param print_bits int, bit-word for verbosity control
"""
self.m_src = self.configSrc ('source', '*-*|Cspad-*')
self.m_key_in = self.configStr ('key_in', 'image_in')
self.m_key_out = self.configStr ('key_out', 'image_out')
self.rowmin = self.configInt ('rowmin', 0)
self.rowmax = self.configInt ('rowmax', -1)
self.colmin = self.configInt ('colmin', 0)
self.colmax = self.configInt ('colmax', -1)
self.m_print_bits = self.configInt ('print_bits', 1)
self.counter = 0
if self.m_print_bits & 1 : self.print_input_pars()
def beginjob( self, evt, env ) : pass
def beginrun( self, evt, env ) :
self.run = evt.run()
self.exp = env.experiment()
self.evnum = 0
def begincalibcycle( self, evt, env ) : pass
def event( self, evt, env ) :
"""This method is called for every L1Accept transition.
@param evt event data object
@param env environment object
"""
# Should work for both pyana and pytonic-psana (as compatability method):
#print '\nimage_crop: evt.keys():', evt.keys()
if env.fwkName() == "psana":
#self.arr = evt.get(np.ndarray, self.m_key_in)
self.arr = evt.get(np.ndarray, self.m_src, self.m_key_in)
else :
self.arr = evt.get(self.m_key_in)
self.counter +=1
if self.arr is None :
#if self.m_print_bits & 32 :
msg = __name__ + ': WARNING! CSPAD array object %s is not found in evt' % self.m_key_in
#logging.info( msg )
print msg
return
if self.m_print_bits & 2 and self.counter == 1 :
self.print_image_parameters()
self.img2d = np.array(self.arr[self.rowmin:self.rowmax, self.colmin:self.colmax])
#self.img2d = self.arr
#evt.put( self.img2d, self.m_key_out ) # save image in event as 2d numpy array
evt.put( self.img2d, self.m_src, self.m_key_out ) # save image in event as 2d numpy array
def endcalibcycle( self, evt, env ) : pass
def endrun ( self, evt, env ) : pass
def endjob ( self, evt, env ) : pass
#-----------------------------
def print_input_pars( self ) :
msg = '\n%s: List of input parameters\n source %s\n key_in %s\n key_out %s\n print_bits: %4d' % \
(__name__ , self.m_src, self.m_key_in, self.m_key_out, self.m_print_bits) + \
'\n rowmin %s\n rowmax %s\n colmin %s\n colmax %s\n' % \
(self.rowmin, self.rowmax, self.colmin, self.colmax)
#logging.info( msg )
print msg
def print_image_parameters( self ) :
msg = '%s: Input image parameters for run = %s:\n' % (__name__, self.run) \
+ ' shape = %s' % str(self.arr.shape) \
+ ' dtype = %s' % str(self.arr.dtype)
# + '\narray:\n' + str(self.arr)
#logging.info( msg )
print msg
#-----------------------------
#-----------------------------
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] | [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
7fef47453d50a19daade3ccb1cac7c5cf2db9935 | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1317042838/_codecs.py | 19d1dd99bb98e64143e5afef560b11128f7229a6 | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 8,128 | py | # encoding: utf-8
# module _codecs
# from (built-in)
# by generator 1.147
# no doc
# no imports
# functions
def ascii_decode(*args, **kwargs): # real signature unknown
pass
def ascii_encode(*args, **kwargs): # real signature unknown
pass
def charmap_build(*args, **kwargs): # real signature unknown
pass
def charmap_decode(*args, **kwargs): # real signature unknown
pass
def charmap_encode(*args, **kwargs): # real signature unknown
pass
def decode(*args, **kwargs): # real signature unknown
"""
Decodes obj using the codec registered for encoding.
Default encoding is 'utf-8'. errors may be given to set a
different error handling scheme. Default is 'strict' meaning that encoding
errors raise a ValueError. Other possible values are 'ignore', 'replace'
and 'backslashreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
pass
def encode(*args, **kwargs): # real signature unknown
"""
Encodes obj using the codec registered for encoding.
The default encoding is 'utf-8'. errors may be given to set a
different error handling scheme. Default is 'strict' meaning that encoding
errors raise a ValueError. Other possible values are 'ignore', 'replace'
and 'backslashreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
pass
def escape_decode(*args, **kwargs): # real signature unknown
pass
def escape_encode(*args, **kwargs): # real signature unknown
pass
def latin_1_decode(*args, **kwargs): # real signature unknown
pass
def latin_1_encode(*args, **kwargs): # real signature unknown
pass
def lookup(*args, **kwargs): # real signature unknown
""" Looks up a codec tuple in the Python codec registry and returns a CodecInfo object. """
pass
def lookup_error(errors): # real signature unknown; restored from __doc__
"""
lookup_error(errors) -> handler
Return the error handler for the specified error handling name or raise a
LookupError, if no handler exists under this name.
"""
pass
def raw_unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def raw_unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def readbuffer_encode(*args, **kwargs): # real signature unknown
pass
def register(*args, **kwargs): # real signature unknown
"""
Register a codec search function.
Search functions are expected to take one argument, the encoding name in
all lower case letters, and either return None, or a tuple of functions
(encoder, decoder, stream_reader, stream_writer) (or a CodecInfo object).
"""
pass
def register_error(*args, **kwargs): # real signature unknown
"""
Register the specified error handler under the name errors.
handler must be a callable object, that will be called with an exception
instance containing information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple.
"""
pass
def unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_decode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_7_decode(*args, **kwargs): # real signature unknown
pass
def utf_7_encode(*args, **kwargs): # real signature unknown
pass
def utf_8_decode(*args, **kwargs): # real signature unknown
pass
def utf_8_encode(*args, **kwargs): # real signature unknown
pass
def _forget_codec(*args, **kwargs): # real signature unknown
""" Purge the named codec from the internal codec lookup cache """
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f3ceeae5048>, 'find_spec': <classmethod object at 0x7f3ceeae5080>, 'find_module': <classmethod object at 0x7f3ceeae50b8>, 'create_module': <classmethod object at 0x7f3ceeae50f0>, 'exec_module': <classmethod object at 0x7f3ceeae5128>, 'get_code': <classmethod object at 0x7f3ceeae5198>, 'get_source': <classmethod object at 0x7f3ceeae5208>, 'is_package': <classmethod object at 0x7f3ceeae5278>, 'load_module': <classmethod object at 0x7f3ceeae52b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_codecs', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
| [
"[email protected]"
] | |
419ece67d00ba37f7460c57ca43884e4ca9e0b2f | dafd25bce1c1fe6c667119e7a541b3cdb44b6181 | /youtube_tranding/auto.py | 454beae653fc6832b41abb2c345f1805d9012b0e | [] | no_license | armannurhidayat/python_scraper | 23fd84dfb7263b7e23a96bb72ee8b7ce68cb52df | 5b0b56247e753f190a9dfdd0bbc150f84d1609f9 | refs/heads/master | 2023-02-05T00:00:48.380383 | 2020-12-27T02:28:29 | 2020-12-27T02:28:29 | 250,330,177 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import mysql.connector
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
config = {
'user' : 'root',
'password': 'root',
'host' : 'localhost',
'port' : 8889,
'database': 'yt_trending',
'raise_on_warnings': True
}
url = 'https://www.youtube.com/feed/trending?gl=ID&hl=id'
path_driver = "/Users/armannurhidayat/Desktop/selenium/driver/chromedriver"
chrome_options = Options()
chrome_options.add_argument("--incognito")
driver = webdriver.Chrome(executable_path=path_driver, options=chrome_options)
driver.get(url)
video_title = driver.find_elements_by_xpath('//*[@id="video-title"]')
for rec in video_title:
title = rec.text
link = rec.get_attribute('href')
# Insert SQL
print('{} - {}'.format(title, link))
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(dictionary=True)
cursor.execute("INSERT INTO `trand` (`title`,`url`) VALUES ('{}', '{}')".format(title, link))
cnx.commit()
time.sleep(2) | [
"[email protected]"
] | |
de900b93a39989817a16977f10cddadf7b234e32 | 06292f96cba132ca57777672a447cfff7c5abee6 | /Asm1/ThamKhao/testPhuc/ParserSuite.py | 940fca2dcbd72c810ca6309115c038b9b10115ef | [] | no_license | kietteik/ppl | 1746440b12affe71e67d6f958922b32b1fdaab5c | 2ee60582e81595b8d8b5d0f8212d20151cfe9264 | refs/heads/master | 2023-03-01T00:24:36.969189 | 2021-01-31T05:15:13 | 2021-01-31T05:15:13 | 305,802,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,965 | py | import unittest
from TestUtils import TestParser
class ParserSuite(unittest.TestCase):
def test_var_dec_1(self):
input = """Var: a = 5;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,201))
def test_var_dec_2(self):
input = """Var: ;"""
expect = "Error on line 1 col 5: ;"
self.assertTrue(TestParser.checkParser(input,expect,202))
def test_var_dec_3(self):
input = """Var: b[2][3] = {{2,3,4},{4,5,6}};"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,203))
def test_var_dec_4(self):
input = """Var: c, d = 6, e, f;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,204))
def test_var_dec_5(self):
input = """Var: m, n[10];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,205))
def test_var_dec_6(self):
input = """Var: n[10], m;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,206))
def test_assignment_stm_1(self):
input = """a[3 + foo(2)] = a[b [2][3]] + 4;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,207))
def test_assignment_stm_2(self):
input = """v = (4. \. 3.) *. 3.14 *. r *. r *. r;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,208))
def test_callee_stm_1(self):
input = """foo(a[1][2] + 2, x + 1);"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,209))
def test_callee_stm_2(self):
input = """x = foo(a[1][2] + 2, x + 1);"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,210))
def test_function_1(self):
input = """Function: foo
Parameter: a[5], b
Body:
Var: i = 0;
While (i < 5) Do
a[i] = b +. 1.0;
i = i + 1;
EndWhile.
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,211))
def test_for_1(self):
input = """For (i = 0, i < 10, 2) Do
writeln(i);
EndFor."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,212))
def test_var_dec_7(self):
input = """Var: a = 5;
Var: b[2][3] = {{2,3,4},{4,5,6}};
Var: c, d = 6, e, f;
Var: m, n[10];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,213))
def test_var_dec_func_dec_1(self):
input = """Var: x;
Function: fact
Parameter: n
Body:
If n == 0 Then
Return 1;
Else
Return n * fact (n - 1);
EndIf.
EndBody.
Function: main
Body:
x = 10;
fact (x);
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,214))
def test_var_dec_8(self):
input = """Var: a[5] = {1,4,3,2,0};
Var: b[2][3]={{1,2,3},{4,5,6}};"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,215))
def test_var_dec_func_dec_2(self):
input = """Var: x;
Function: fact
Parameter: n
Body:
If n == 0 Then
Return 1;
Else
Return n * fact (n - 1);
EndIf.
EndBody.
Function: main
Body:
x = 10;
fact (x);
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,216))
def test_callee_stm_3(self):
input = """foo (2 + x, 4. \. y);
goo ();"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,217))
def test_type_coercions_1(self):
input = """If bool_of_string ("True") Then
a = int_of_string (read ());
b = float_of_int (a) +. 2.0;
EndIf."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,218))
def test_var_dec_func_dec_3(self):
input = """Function: test
Parameter: n
Body:
If n > 10 Then
Return 5;
Else
Return False;
EndIf.
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,219))
def test_var_dec_9(self):
input = """a = 1;
b[2][3] = 5;
c[2] = {{1,3},{1,5,7}};"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,220))
def test_var_dec_func_dec_4(self):
input = """Function: test
Parameter: n
Body:
If n > 10 Then
Return 5;
Else
Return a[4][5 + b[2][3]];
EndIf.
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,221))
def test_var_dec_10(self):
input = """Var: a = "Xin chao moi nguoi!";
Var: b = 5, c = False;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,222))
| [
"[email protected]"
] | |
7c7c20cdfdc1ad53c73c20ab931e5efcff7528aa | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/15174011.py | d436adf8e9635a383cb4126186976d45e43be6ef | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/15174011.py generated: Fri, 27 Mar 2015 16:10:06
#
# Event Type: 15174011
#
# ASCII decay Descriptor: [Lambda_b0 -> D+(-> K- pi+ pi+) mu-]cc
#
from Configurables import Generation
Generation().EventType = 15174011
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Lb_D+mu,Kpipi=phsp,DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 5122,-5122 ]
| [
"[email protected]"
] | |
6dae49d404619d13917ca5e5dd08fa811d287aad | 37db56765276c0835a2c7e3955c412ce204836c1 | /241.py | 19a44d3fb46bd049d5ae60b83250f8d7f974f167 | [] | no_license | supperllx/LeetCode | 9d0a3a7258d1cff6afa6e77f61a2e697834914ca | df3a589ea858218f689fe315d134adc957c3debd | refs/heads/master | 2023-05-01T06:57:17.403568 | 2021-05-19T18:29:25 | 2021-05-19T18:34:03 | 288,351,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | class Solution:
@functools.cache
def diffWaysToCompute(self, input: str) -> List[int]:
if input.isnumeric():
return [int(input)]
else:
ops = {'+': operator.add, '-': operator.sub, '*': operator.mul}
res = []
indexes = []
for i, ch in enumerate(input):
if ch in ops:
indexes.append(i)
for i in indexes:
for left in self.diffWaysToCompute(input[:i]):
for right in self.diffWaysToCompute(input[i+1:]):
res.append(ops[input[i]](left, right))
return list(res) | [
"[email protected]"
] | |
ab05aff12541f245de8fe7bd4437943a46a928ec | 77166c6ed9b872fa69b454d3682f63527f5f3951 | /tests/unit2/test_load_textures.py | 9f74bd64ebe79c8225596d0f8f2a84e1267ace5a | [
"MIT"
] | permissive | biggzlar/arcade | d72d936f3c244a9d5173b6f36bca3ede3382a0ae | fc444db356452660ac6cb2ffe241f0b1a3d4bcf3 | refs/heads/master | 2020-12-14T06:30:18.997456 | 2020-01-18T04:44:03 | 2020-01-18T04:44:03 | 234,668,560 | 1 | 0 | NOASSERTION | 2020-01-18T02:07:41 | 2020-01-18T02:07:40 | null | UTF-8 | Python | false | false | 4,737 | py | import os
import arcade
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
LINE_HEIGHT = 20
CHARACTER_SCALING = 0.5
COIN_SCALE = 0.25
class MyTestWindow(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.frame_count = 0
arcade.set_background_color(arcade.color.AMAZON)
self.character_list = arcade.SpriteList()
self.player = arcade.AnimatedWalkingSprite()
self.player.stand_right_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_idle.png")]
self.player.stand_left_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_idle.png", mirrored=True)]
self.player.walk_right_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_walk0.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk1.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk2.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk3.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk4.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk5.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk6.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk7.png")]
self.player.walk_left_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_walk0.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk1.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk2.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk3.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk4.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk5.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk6.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk7.png", mirrored=True)]
self.player.texture_change_distance = 20
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player.scale = 0.8
self.player.change_x = 2
self.player.texture = self.player.stand_left_textures[0]
self.character_list.append(self.player)
self.coin_list = arcade.SpriteList()
coin = arcade.AnimatedTimeSprite(scale=0.5)
coin.center_x = 500
coin.center_y = 500
coin.textures = []
coin.textures.append(arcade.load_texture(":resources:images/items/gold_1.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_2.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_3.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_4.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_3.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_2.png", scale=COIN_SCALE))
coin.set_texture(0)
self.coin_list.append(coin)
def on_draw(self):
arcade.start_render()
self.coin_list.draw()
self.character_list.draw()
def update(self, delta_time):
self.frame_count += 1
if self.frame_count == 70:
self.player.change_x *= -1
self.coin_list.update()
self.coin_list.update_animation(delta_time)
self.character_list.update()
self.character_list.update_animation(delta_time)
def test_sprite():
window = MyTestWindow(SCREEN_WIDTH, SCREEN_HEIGHT, "Test Animation")
window.test(90)
window.close()
| [
"[email protected]"
] | |
ca40a4e3bed0d8bd97234170a3f8ec82dfa77d02 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/14_Code_Quality/04_mocking/example_2/test_calculator.py | 773fb6d377820f066c3352f19151f1de0c8dade3 | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from unittest import TestCase
from main import Calculator
class TestCalculator(TestCase):
def setUp(self):
self.calc = Calculator()
def test_sum(self):
answer = self.calc.sum(2, 4)
self.assertEqual(answer, 6)
| [
"[email protected]"
] | |
1965d29d375c6499f282d0e556160f23324bf4c5 | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-gc-shared/visitor_analytics/migrations/0001_initial.py | 8f99c744d9c12c4a97d23a04880458deedb220fe | [] | no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UtmAnalytics',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('utm_source', models.CharField(default=b'', max_length=256)),
('utm_medium', models.CharField(default=b'', max_length=256)),
('utm_campaign', models.CharField(default=b'', max_length=256)),
('utm_timestamp', models.DateTimeField(null=True)),
('referrer', models.CharField(default=b'', max_length=2048)),
('referrer_timestamp', models.DateTimeField(null=True)),
('agent_code_timestamp', models.DateTimeField(null=True)),
('registration_page', models.CharField(default=b'', max_length=4096)),
('user', models.OneToOneField(related_name='utm_analytics', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"ibalyko@ubuntu-server-16-04"
] | ibalyko@ubuntu-server-16-04 |
c51fe65b0eea696958298037853329c6a2320d97 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/62/usersdata/172/29550/submittedfiles/ex1.py | c6a1b4c62ab0c54ae5cb58541624113d1e9cb84a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = float(input('Digite a: '))
b = float(input('Digite b: '))
c = float(input('Digite c: '))
#COMECE A PARTIR DAQUI!
d=b*b-4(a*c)
print(d) | [
"[email protected]"
] | |
7d24a6875659d927451451cfe5c01e242aee20e7 | 417ab6024a95e97b4d2236c67e28d00e6d1defc0 | /python/fetch/mayi01/img.py | 589afd6265b67e6b4c17fef51b293b349378b05d | [] | no_license | zeus911/myconf | 11139069948f7c46f760ca0a8f1bd84df5ec4275 | 6dc7a6761ab820d6e97a33a55a8963f7835dbf34 | refs/heads/master | 2020-04-18T02:16:09.560219 | 2019-01-22T18:15:08 | 2019-01-22T18:15:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,826 | py | #!/usr/bin python
# -*- coding: utf-8 -*-
import datetime
import threading
from common import common
from baseparse import *
from common import db_ops
from common.envmod import *
from common import dateutil
from fetch.profile import *
global baseurl
class ImgParse(BaseParse):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
objs = self.parseChannel()
dbVPN = db.DbVPN()
ops = db_ops.DbOps(dbVPN)
for obj in objs:
ops.inertImgChannel(obj)
dbVPN.commit()
for obj in objs:
for i in range(1, maxImgPage):
url = obj['url']
if i!=1:
url= "%s%s%s"%(url.replace("0.html",""),i,".html")
print url
count = self.update(url, ops, obj['url'], i)
dbVPN.commit()
if count == 0:
break
def parseChannel(self):
ahrefs = self.header("header3.html")
objs = []
for ahref in ahrefs:
obj = {}
obj['name']= ahref.text
obj['url']=ahref.get("href")
obj['baseurl'] = baseurlImg
obj['updateTime'] = datetime.datetime.now()
obj['rate'] = 1.1
obj['showType'] = 3
obj['channel'] = 'porn_sex'
objs.append(obj)
return objs
def update(self, url, ops, channel, i):
objs = self.fetchImgItemsData(url, channel)
print "解析 Img 图片ok----channl=", channel, ' 页数=', i, " 数量=", len(objs)
for obj in objs:
try:
ops.inertImgItems(obj)
for picItem in obj['picList']:
item = {}
item['itemUrl'] = obj['url']
item['picUrl'] = picItem
ops.inertImgItems_item(item)
except Exception as e:
print common.format_exception(e)
return len(objs)
def fetchDataHead(self, url):
try:
soup = self.fetchUrl(baseurlImg+url)
div = soup.first("div", {"class": "container"})
if div != None:
return div.findAll('a')
return []
except Exception as e:
print common.format_exception(e)
def fetchImgItemsData(self, url, channel):
try:
lis = self.fetchDataHead(url)
print url, ";itemsLen=", len(lis)
objs = []
sortType = dateutil.y_m_d()
for item in lis:
obj = {}
obj['name'] = item.first("div",{"class":"float-left"}).text
print obj['name']
obj['url'] = item.get('href')
obj['fileDate'] = item.first("div",{"class":"float-right"}).text
obj['baseurl'] = baseurlImg
obj['channel'] = channel
obj['updateTime'] = datetime.datetime.now()
pics = self.fetchImgs(item.get('href'))
if len(pics) == 0:
print '没有 图片文件--', item, '---', url
continue
obj['picList'] = pics
obj['pics'] = len(pics)
obj['sortType'] = sortType
obj['showType'] = 3
print 'url=', obj['url'], 'filedate=', obj['fileDate'], ' 图片数量=', len(pics)
objs.append(obj)
return objs
except Exception as e:
print common.format_exception(e)
def fetchImgs(self, url):
soup = self.fetchUrl(baseurlImg+url)
picData = soup.first("div", {"class": "imgList"})
picList = picData.findAll("img")
pics = []
for item in picList:
pics.append(item.get('data-original'))
return pics
| [
"[email protected]"
] | |
54a4f16e087a850dd1d3dd642d82d5f350eae00e | de28d64694919a861760fa8db8ff5ff781c9e4e3 | /wild_card_matching.py | 653f701bd6d307a858b2dc5d5dc736c7214b34a3 | [] | no_license | pramitsawant/interview_prep_python | 2202ac6bd7574316885f6067a8c6ac98fc5e88af | 7a3dad53b954f874995bafdbfd5677959f8b8de7 | refs/heads/master | 2020-09-25T06:34:26.903728 | 2016-12-12T13:59:46 | 2016-12-12T13:59:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # coding=utf-8
'''
'?' Matches any single character.
'*' Matches any sequence of characters (including the empty sequence).
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "*") → true
isMatch("aa", "a*") → true
isMatch("ab", "?*") → true
isMatch("aab", "c*a*b") → false
'''
def wild_card(word, pattern, word_index, pattern_index):
if len(pattern) == pattern_index and len(word) == word_index:
return True
if len(pattern) == pattern_index or len(word) == word_index:
return False
if pattern[pattern_index] == '?':
return wild_card(word, pattern, word_index + 1, pattern_index + 1)
elif pattern[pattern_index] == '*':
while word_index < len(word) - 1 and (word_index == 0 or word[word_index + 1] == word[word_index]):
word_index += 1
return wild_card(word, pattern, word_index + 1, pattern_index + 1)
elif word[word_index] == pattern[pattern_index]:
return wild_card(word, pattern, word_index + 1, pattern_index + 1)
else:
return False
print wild_card('aaaab', '*?', 0, 0)
| [
"[email protected]"
] | |
8f4bb724abec5c2be28133f11fadc5f0306fc94f | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fv/from.py | 59ac01b9a307e3cca82f10539aba98acaab784ca | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,246 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class From(Mo):
meta = ClassMeta("cobra.model.fv.From")
meta.isAbstract = True
meta.moClassName = "fvFrom"
meta.moClassName = "fvFrom"
meta.rnFormat = ""
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "None"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x0
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.concreteSubClasses.add("cobra.model.nw.RtL3EpDefToPathEp")
meta.concreteSubClasses.add("cobra.model.mgmt.RtRtdMgmtConf")
meta.concreteSubClasses.add("cobra.model.pc.RtAccBndlGrpToAggrIf")
meta.concreteSubClasses.add("cobra.model.fv.RtIpEppAtt")
meta.concreteSubClasses.add("cobra.model.l3ext.RtAddrToIpDef")
meta.concreteSubClasses.add("cobra.model.cons.RtConsRoot")
meta.concreteSubClasses.add("cobra.model.l3.RtIpCktEpIfConn")
meta.concreteSubClasses.add("cobra.model.l2.RtSpanSrcToL2CktEpAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtEncPhysRtdConf")
meta.concreteSubClasses.add("cobra.model.svccopy.RtToCopyDestGrp")
meta.concreteSubClasses.add("cobra.model.dhcp.RtRelayAddrToProv")
meta.concreteSubClasses.add("cobra.model.fv.RtQinqEppAtt")
meta.concreteSubClasses.add("cobra.model.pc.RtFcAccBndlGrpToFcAggrIf")
meta.concreteSubClasses.add("cobra.model.l1.RtBrConf")
meta.concreteSubClasses.add("cobra.model.rtextcom.RtExtCommAtt")
meta.concreteSubClasses.add("cobra.model.qosp.RtDot1pRuleAtt")
meta.concreteSubClasses.add("cobra.model.qosp.RtL3dscpRuleAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtLbIfToLocale")
meta.concreteSubClasses.add("cobra.model.leqpt.RtEpDefToLooseNode")
meta.concreteSubClasses.add("cobra.model.fv.RtIpAddr")
meta.concreteSubClasses.add("cobra.model.l1.RtPhysRtdConf")
meta.concreteSubClasses.add("cobra.model.svcredir.RtHealthGrpAtt")
meta.concreteSubClasses.add("cobra.model.l3.RtEPgDefToL3Dom")
meta.concreteSubClasses.add("cobra.model.nw.RtVsanLabelAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtIoPPhysConf")
meta.concreteSubClasses.add("cobra.model.fv.RtCtxToEpP")
meta.concreteSubClasses.add("cobra.model.fv.RtMacEppAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtToObservedEthIf")
meta.concreteSubClasses.add("cobra.model.pc.RtPcFcAggrBrConf")
meta.concreteSubClasses.add("cobra.model.nw.RtVsanPathAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtVlanEppAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtVxlanEppAtt")
meta.concreteSubClasses.add("cobra.model.nw.RtEpDefToPathEp")
meta.concreteSubClasses.add("cobra.model.ip.RtRtDefIpAddr")
meta.concreteSubClasses.add("cobra.model.l2.RtDomIfConn")
meta.concreteSubClasses.add("cobra.model.pc.RtVpcConf")
meta.concreteSubClasses.add("cobra.model.pc.RtFexBndlGrpToAggrIf")
meta.concreteSubClasses.add("cobra.model.rtregcom.RtRegCommAtt")
meta.concreteSubClasses.add("cobra.model.rtpfx.RtRtNhAtt")
meta.concreteSubClasses.add("cobra.model.nw.RtPathDomAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtExtBD")
meta.concreteSubClasses.add("cobra.model.l2.RtSrvExtIfMap")
meta.concreteSubClasses.add("cobra.model.qosp.RtL3dot1pRuleAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtExtConf")
meta.concreteSubClasses.add("cobra.model.qosp.RtDscpRuleAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtFcBrConf")
meta.concreteSubClasses.add("cobra.model.fv.RtToTunDef")
meta.concreteSubClasses.add("cobra.model.analytics.RtMonitorAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtL3If")
meta.concreteSubClasses.add("cobra.model.fv.RtRouteToIfConn")
meta.concreteSubClasses.add("cobra.model.fv.RtToEpgProt")
meta.concreteSubClasses.add("cobra.model.l1.RtLocaleToObservedEthIf")
meta.concreteSubClasses.add("cobra.model.rtpfx.RtRtDstAtt")
meta.concreteSubClasses.add("cobra.model.l2.RtEPgDefToL2Dom")
meta.concreteSubClasses.add("cobra.model.ip.RtRouteToRouteDef")
meta.concreteSubClasses.add("cobra.model.svcredir.RtToRedirDestGrp")
meta.concreteSubClasses.add("cobra.model.fv.RtMacBaseEppAtt")
meta.concreteSubClasses.add("cobra.model.nw.RtEpDefRefToPathEp")
meta.concreteSubClasses.add("cobra.model.svccopy.RtCopyDestAtt")
meta.concreteSubClasses.add("cobra.model.rtpfx.RtRtSrcAtt")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 101, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
d3920561c0ee2d24d702c2e5d3ce5eb4d555dbd3 | b94ab99f9c1f8bbb99afd23e1bfcd2332060b4bd | /run.py | 4e3d7e4f1400e503e81bde7fd29bd036b083e6d3 | [] | no_license | georgecai904/bookshelf | e54ccae00d4ee48e91ca1564a425ba4586b52d93 | 0002207dc8ca586ce1127d3ea98bb53102d043df | refs/heads/master | 2021-01-02T22:52:26.046535 | 2017-08-05T15:32:13 | 2017-08-05T15:32:13 | 99,409,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import time
def run_bash_command(command):
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
print(process.communicate())
while True:
run_bash_command("python manage.py runcrons")
time.sleep(60) | [
"[email protected]"
] | |
09ab405b4618a48b3621a5259a4853a7ee190e10 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/HPN-ICF-MULTICAST-SNOOPING-MIB.py | f19c2866685a9ddc231b0cb4568dcf6412d89090 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 44,155 | py | #
# PySNMP MIB module HPN-ICF-MULTICAST-SNOOPING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-MULTICAST-SNOOPING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:40:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, Integer32, TimeTicks, Gauge32, ObjectIdentity, Unsigned32, Counter64, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, ModuleIdentity, NotificationType, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "TimeTicks", "Gauge32", "ObjectIdentity", "Unsigned32", "Counter64", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "ModuleIdentity", "NotificationType", "Counter32")
DisplayString, TruthValue, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention", "RowStatus")
hpnicfMulticastSnoop = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123))
hpnicfMulticastSnoop.setRevisions(('2014-05-14 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpnicfMulticastSnoop.setRevisionsDescriptions(('The initial version of this MIB file.',))
if mibBuilder.loadTexts: hpnicfMulticastSnoop.setLastUpdated('201405141700Z')
if mibBuilder.loadTexts: hpnicfMulticastSnoop.setOrganization('')
if mibBuilder.loadTexts: hpnicfMulticastSnoop.setContactInfo('')
if mibBuilder.loadTexts: hpnicfMulticastSnoop.setDescription('IGMP/MLD Snooping Management MIB')
class HpnicfVirtualUnitType(TextualConvention, Integer32):
description = 'VLAN configuration or VSI configuration.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("vlan", 1), ("vsi", 2))
hpnicfMulticastSnoopObject = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1))
hpnicfMcsGlobalConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1), )
if mibBuilder.loadTexts: hpnicfMcsGlobalConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlobalConfigTable.setDescription('Table containing information about the global configuration of IGMP/MLD snooping.')
hpnicfMcsGlobalConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsGlbSnoopingType"))
if mibBuilder.loadTexts: hpnicfMcsGlobalConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlobalConfigEntry.setDescription('Entry containing information about the global configuration of IGMP/MLD snooping.')
hpnicfMcsGlbSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 1), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsGlbSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbSnoopingType.setDescription('Type of the global configuration. IPv4 means IGMP snooping configuration, and IPv6 means MLD snooping configuration.')
hpnicfMcsGlbRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbRowStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbRowStatus.setDescription('The activation of a row enables IGMP/MLD snooping globally. The destruction of a row disables IGMP/MLD snooping globally.')
hpnicfMcsGlbEntryLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbEntryLimit.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbEntryLimit.setDescription('Global maximum number of IGMP/MLD snooping forwarding entries.')
hpnicfMcsGlbHostAgingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 8097894)).clone(260)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbHostAgingTime.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbHostAgingTime.setDescription('Global aging time of the multicast group on ports.')
hpnicfMcsGlbMaxResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 3174)).clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbMaxResponseTime.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbMaxResponseTime.setDescription('Global maximum query response time.')
hpnicfMcsGlbRouterAgingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 8097894)).clone(260)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbRouterAgingTime.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbRouterAgingTime.setDescription('Global aging time of router ports.')
hpnicfMcsGlbLastMemQryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbLastMemQryInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbLastMemQryInterval.setDescription('Global last member query interval.')
hpnicfMcsGlbDropUnknownEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 1, 1, 8), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsGlbDropUnknownEnabled.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsGlbDropUnknownEnabled.setDescription('Whether the feature of dropping unknown packets is enabled globally.')
hpnicfMcsVirtualUnitConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2), )
if mibBuilder.loadTexts: hpnicfMcsVirtualUnitConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVirtualUnitConfigTable.setDescription('Table containing configuration information about the specified VLAN or VSI.')
hpnicfMcsVirtualUnitConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsVUType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsVUID"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsVUSnoopingType"))
if mibBuilder.loadTexts: hpnicfMcsVirtualUnitConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVirtualUnitConfigEntry.setDescription('Entry containing information of the specified VLAN or VSI.')
hpnicfMcsVUType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 1), HpnicfVirtualUnitType())
if mibBuilder.loadTexts: hpnicfMcsVUType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUType.setDescription('Type of virtual unit.')
hpnicfMcsVUID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 2), Unsigned32())
if mibBuilder.loadTexts: hpnicfMcsVUID.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUID.setDescription('VLAN ID or VSI Index. Its value ranges from 1 to 4094 when type is VLAN, and from 0 to 0xFFFFFFFE when type is VSI')
hpnicfMcsVUSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 3), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsVUSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUSnoopingType.setDescription('Type of the configuration. IPv4 means IGMP snooping configuration, and IPv6 means MLD snooping configuration.')
hpnicfMcsVURowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVURowStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVURowStatus.setDescription('The activation of a row enables IGMP/MLD snooping in the VLAN or VSI. The destruction of a row disables IGMP/MLD snooping in the VLAN or VSI.')
hpnicfMcsVUHostAgingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 8097894))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUHostAgingTime.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUHostAgingTime.setDescription('Aging time of the multicast group on ports in the VLAN or VSI. A value of zero indicates that it is not configured in the VLAN or VSI.')
hpnicfMcsVUMaxResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 3174))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUMaxResponseTime.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUMaxResponseTime.setDescription('Maximum query response time in the VLAN or VSI. A value of zero indicates that it is not configured in the VLAN or VSI.')
hpnicfMcsVURouterAgingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 8097894))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVURouterAgingTime.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVURouterAgingTime.setDescription('Aging time of the router port in the VLAN or VSI. A value of zero indicates that it is not configured in the VLAN or VSI.')
hpnicfMcsVULastMemQryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 25))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVULastMemQryInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVULastMemQryInterval.setDescription('Last member query interval in the VLAN or VSI. A value of zero indicates that it is not configured in the VLAN or VSI.')
hpnicfMcsVUDropUnknownEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 9), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUDropUnknownEnabled.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUDropUnknownEnabled.setDescription('Whether the feature of dropping unknown packets is enabled in the VLAN or VSI.')
hpnicfMcsVUPimSnoopingEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 10), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUPimSnoopingEnabled.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUPimSnoopingEnabled.setDescription('Whether PIM snooping is enabled in the VLAN or VSI.')
hpnicfMcsVUVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 11), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(2, 2), ValueRangeConstraint(3, 3), )).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUVersion.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUVersion.setDescription('Version of IGMP/MLD snooping that is running on the VLAN. Value 2 represents IGMPv2 snooping and MLDv1 snooping, and value 3 represents IGMPv3 snooping and MLDv2 snooping.')
hpnicfMcsVUQuerierEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 12), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUQuerierEnabled.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUQuerierEnabled.setDescription('Whether the querier feature is enabled in the VLAN or VSI.')
hpnicfMcsVUQuerierInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(2, 31744)).clone(125)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUQuerierInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUQuerierInterval.setDescription('Query interval.')
hpnicfMcsVUGeneQuerierSourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 14), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUGeneQuerierSourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUGeneQuerierSourceAddress.setDescription('Source IP address of IGMP or MLD general query packets. Its value is 255.255.255.255 or FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF when not configured.')
hpnicfMcsVUSpecQuerierSourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 15), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUSpecQuerierSourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUSpecQuerierSourceAddress.setDescription('Source IP address of IGMP or MLD group-specific query packets. Its value is 255.255.255.255 or FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF when not configured.')
hpnicfMcsVULeaveSourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 16), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVULeaveSourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVULeaveSourceAddress.setDescription('Source IP address of IGMP or MLD leave packets. Its value is 255.255.255.255 or FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF when not configured.')
hpnicfMcsVUReportSourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 2, 1, 17), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsVUReportSourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsVUReportSourceAddress.setDescription('Source IP address of IGMP or MLD report packets. Its value is 255.255.255.255 or FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF when not configured.')
hpnicfMcsL2EntryTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3), )
if mibBuilder.loadTexts: hpnicfMcsL2EntryTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryTable.setDescription('Table containing a list of Layer 2 multicast group entries.')
hpnicfMcsL2EntryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsL2EntryVUType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsL2EntryVUID"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsL2EntryAddressType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsL2EntryGroupAddress"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsL2EntrySourceAddress"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsL2EntryIfIndex"))
if mibBuilder.loadTexts: hpnicfMcsL2EntryEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryEntry.setDescription('Entry of l2-multicast group, which is created for each group learned or configured in the VLAN or VSI.')
hpnicfMcsL2EntryVUType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 1), HpnicfVirtualUnitType())
if mibBuilder.loadTexts: hpnicfMcsL2EntryVUType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryVUType.setDescription('Type of virtual unit.')
hpnicfMcsL2EntryVUID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 2), Unsigned32())
if mibBuilder.loadTexts: hpnicfMcsL2EntryVUID.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryVUID.setDescription('VLAN ID or VSI Index. Its value ranges from 1 to 4094 when type is VLAN, and from 0 to 0xFFFFFFFE when type is VSI.')
hpnicfMcsL2EntryAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 3), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsL2EntryAddressType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryAddressType.setDescription('Type of multicast IP address.')
hpnicfMcsL2EntryGroupAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 4), InetAddress())
if mibBuilder.loadTexts: hpnicfMcsL2EntryGroupAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryGroupAddress.setDescription('IP address of the multicast group which the port joined.')
hpnicfMcsL2EntrySourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 5), InetAddress())
if mibBuilder.loadTexts: hpnicfMcsL2EntrySourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntrySourceAddress.setDescription('IP address of the unicast source which the port joined.')
hpnicfMcsL2EntryIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 6), InterfaceIndex())
if mibBuilder.loadTexts: hpnicfMcsL2EntryIfIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryIfIndex.setDescription('IfIndex value of the port that joined the Layer 2 IP multicast group entry.')
hpnicfMcsL2EntryPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("interface", 1), ("ac", 2), ("npw", 3), ("upw", 4), ("trill", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsL2EntryPortType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryPortType.setDescription('Type of the port.')
hpnicfMcsL2EntryPortAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 3, 1, 8), Bits().clone(namedValues=NamedValues(("d", 0), ("s", 1), ("p", 2), ("k", 3), ("r", 4), ("w", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsL2EntryPortAttribute.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsL2EntryPortAttribute.setDescription("Attribute of the port. 'd' means learned from IGMP/MLD packets, 's' means configured statically, 'p' means learned from PIM snooping, 'k' means obtained from the kernel, 'r' means learned from (*, *) entries, 'w' means learned from (*, G) entries.")
hpnicfMcsPacketStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4), )
if mibBuilder.loadTexts: hpnicfMcsPacketStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPacketStatisticsTable.setDescription('Table containing the IGMP/MLD packets statistics.')
hpnicfMcsPacketStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsStatisticsSnoopingType"))
if mibBuilder.loadTexts: hpnicfMcsPacketStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPacketStatisticsEntry.setDescription('Entry containing the statistic information of IGMP/MLD packets that have been transmitted and received in the device.')
hpnicfMcsStatisticsSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 1), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsStatisticsSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsStatisticsSnoopingType.setDescription('Type of the snooping, IPv4 means the statistics for IGMP snooping, and IPv6 means the statistics for MLD snooping.')
hpnicfMcsRxGeneryQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxGeneryQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxGeneryQueryNum.setDescription('Statistics of IGMP or MLD general query packets received on the device.')
hpnicfMcsRxV2SpecificQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV2SpecificQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV2SpecificQueryNum.setDescription('Statistics of IGMPv2 or MLDv1 group-specific query packets received on the device.')
hpnicfMcsRxV3SpecificQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV3SpecificQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV3SpecificQueryNum.setDescription('The statistics of IGMPv3 or MLDv2 group-specific query packets received on the device.')
hpnicfMcsRxV3SpecificSGQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV3SpecificSGQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV3SpecificSGQueryNum.setDescription('Statistics of IGMPv3 or MLDv2 group-and-source-specific query packets received on the device.')
hpnicfMcsRxV1ReportNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV1ReportNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV1ReportNum.setDescription('Statistics of IGMPv1 report packets received on the device.')
hpnicfMcsRxV2ReportNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV2ReportNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV2ReportNum.setDescription('Statistics of IGMPv2 or MLDv1 report packets received on the device.')
hpnicfMcsRxV3ReportNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV3ReportNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV3ReportNum.setDescription('Statistics of IGMPv3 or MLDv2 report packets received on the device.')
hpnicfMcsRxV3ErrCorReportNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxV3ErrCorReportNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxV3ErrCorReportNum.setDescription('Statistics of IGMPv3 or MLDv2 report packets with correct and incorrect records received on the device.')
hpnicfMcsRxLeaveNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxLeaveNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxLeaveNum.setDescription('Statistics of leave packets received on the device.')
hpnicfMcsRxPimHelloNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxPimHelloNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxPimHelloNum.setDescription('Statistics of PIM hello packets received on the device.')
hpnicfMcsRxErrorPacketNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsRxErrorPacketNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRxErrorPacketNum.setDescription('Statistics of error IGMP/MLD packets received on the device.')
hpnicfMcsTxV2SpecificQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsTxV2SpecificQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsTxV2SpecificQueryNum.setDescription('Statistics of IGMPv2 or MLDv1 group-specific query packets sent from the device.')
hpnicfMcsTxV3SpecificQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsTxV3SpecificQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsTxV3SpecificQueryNum.setDescription('Statistics of IGMPv3 or MLDv2 group-specific query packets sent from the device.')
hpnicfMcsTxV3SpecificSGQueryNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 4, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfMcsTxV3SpecificSGQueryNum.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsTxV3SpecificSGQueryNum.setDescription('Statistics of IGMPv3 or MLDv2 group-and-source-specific query packets sent from the device.')
hpnicfMcsPortJoinGroupConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5), )
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupConfigTable.setDescription('Table for configuring a port as a simulated member host for a multicast group.')
hpnicfMcsPortJoinGroupConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortJoinGroupIfIndex"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortJoinGroupSnoopingType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortJoinGroupVlanID"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortJoinGroupGroupAddress"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortJoinGroupSourceAddress"))
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupConfigEntry.setDescription('Entry for configuring a port as a simulated member host for a multicast group.')
hpnicfMcsPortJoinGroupIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupIfIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupIfIndex.setDescription('Port for which this entry contains information.')
hpnicfMcsPortJoinGroupSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1, 2), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupSnoopingType.setDescription('Type of the configuration. IPv4 means IGMP snooping configuration, and IPv6 means MLD snooping configuration.')
hpnicfMcsPortJoinGroupVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupVlanID.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupVlanID.setDescription('Index uniquely identifying the specified VLAN in which a host on a port joined the multicast group.')
hpnicfMcsPortJoinGroupGroupAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1, 4), InetAddress())
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupGroupAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupGroupAddress.setDescription('IP address of the group to which the host belongs.')
hpnicfMcsPortJoinGroupSourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1, 5), InetAddress())
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupSourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupSourceAddress.setDescription('IP address of the source. A value of zero indicates that the multicast packets of this group can come from any sources.')
hpnicfMcsPortJoinGroupStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 5, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortJoinGroupStatus.setDescription("This object is responsible for managing rows, which supports 'active', 'createAndGo' and 'destroy'.")
hpnicfMcsPortStaticGroupConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6), )
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupConfigTable.setDescription('Table for configuring static group membership entries on a port.')
hpnicfMcsPortStaticGroupConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortStaticGroupIfIndex"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortStaticGroupSnoopingType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortStaticGroupVlanID"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortStaticGroupGroupAddress"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortStaticGroupSourceAddress"))
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupConfigEntry.setDescription('Entry for configuring static group membership entries on a port.')
hpnicfMcsPortStaticGroupIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupIfIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupIfIndex.setDescription('Port for which this entry contains information.')
hpnicfMcsPortStaticGroupSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1, 2), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupSnoopingType.setDescription('Type of the configuration. IPv4 means IGMP snooping configuration, and IPv6 means MLD snooping configuration.')
hpnicfMcsPortStaticGroupVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupVlanID.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupVlanID.setDescription('Index uniquely identifying the specified VLAN in which a port statically joined the multicast group.')
hpnicfMcsPortStaticGroupGroupAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1, 4), InetAddress())
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupGroupAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupGroupAddress.setDescription('IP address of the multicast group.')
hpnicfMcsPortStaticGroupSourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1, 5), InetAddress())
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupSourceAddress.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupSourceAddress.setDescription('IP address of the source. A value of zero indicates that the multicast packets of this group can come from any sources.')
hpnicfMcsPortStaticGroupStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 6, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortStaticGroupStatus.setDescription("This object is responsible for managing the creation and deletion of rows, which supports 'active', 'createAndGo' and 'destroy'.")
hpnicfMcsRouterPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 7), )
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigTable.setDescription('Table for configuring a port as a static router port.')
hpnicfMcsRouterPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 7, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsRouterPortConfigIfIndex"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsRouterPortConfigSnoopingType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsRouterPortConfigVlanID"))
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigEntry.setDescription('Entry for configuring a port as a static router port.')
hpnicfMcsRouterPortConfigIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 7, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigIfIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigIfIndex.setDescription('Port for which this entry contains information.')
hpnicfMcsRouterPortConfigSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 7, 1, 2), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigSnoopingType.setDescription('Type of the configuration. IPv4 means IGMP snooping configuration, and IPv6 means MLD snooping configuration.')
hpnicfMcsRouterPortConfigVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 7, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigVlanID.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigVlanID.setDescription('Index uniquely identifying the specified VLAN in which a port act as a static router port.')
hpnicfMcsRouterPortConfigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 7, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigRowStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsRouterPortConfigRowStatus.setDescription("This object is a conceptual row entry that allows to add or delete entries to or from the hpnicfMcsRouterPortConfigTable. When an entry is created in this table 'createAndGo' method is used and the value of this object is set to 'active'. Deactivation of an 'active' entry is not allowed. When an entry is deleted in this table 'destroy' method is used.")
hpnicfMcsPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8), )
if mibBuilder.loadTexts: hpnicfMcsPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigTable.setDescription('Table for configuring the fast leave status, group limit number, group policy parameter and overflow replace status on a port in the specified VLAN.')
hpnicfMcsPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1), ).setIndexNames((0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortConfigIfIndex"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortConfigSnoopingType"), (0, "HPN-ICF-MULTICAST-SNOOPING-MIB", "hpnicfMcsPortConfigVlanID"))
if mibBuilder.loadTexts: hpnicfMcsPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigEntry.setDescription('Entry containing information about the fast leave status, group limit number, group policy parameter and overflow replace status of a port in the specified VLAN.')
hpnicfMcsPortConfigIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpnicfMcsPortConfigIfIndex.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigIfIndex.setDescription('Port for which this entry contains information.')
hpnicfMcsPortConfigSnoopingType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 2), InetAddressType())
if mibBuilder.loadTexts: hpnicfMcsPortConfigSnoopingType.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigSnoopingType.setDescription('Type of the configuration. IPv4 means IGMP snooping configuration, and IPv6 means MLD snooping configuration.')
hpnicfMcsPortConfigVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hpnicfMcsPortConfigVlanID.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigVlanID.setDescription('VLAN ID.')
hpnicfMcsPortConfigGroupLimitNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 4), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortConfigGroupLimitNumber.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigGroupLimitNumber.setDescription('Group limit number of the port.')
hpnicfMcsPortConfigFastLeaveStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 5), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortConfigFastLeaveStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigFastLeaveStatus.setDescription('Fast leave status of the port.')
hpnicfMcsPortConfigGroupPolicyParameter = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 6), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(2000, 3999), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortConfigGroupPolicyParameter.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigGroupPolicyParameter.setDescription('ACL number which is used as the group policy parameter of the port.')
hpnicfMcsPortConfigOverflowReplace = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 7), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortConfigOverflowReplace.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigOverflowReplace.setDescription('This object is related to the object hpnicfMcsPortConfigGroupLimitNumber. If the current group number is less than the value of hpnicfMcsPortConfigGroupLimitNumber, any new group is permitted. If the current group number equals to the value of hpnicfMcsPortConfigGroupLimitNumber and the value of this object is enabled, the group with the minimum multicast address will be replaced by the new group. If the current group number equals to the value of hpnicfMcsPortConfigGroupLimitNumber and the value of this object is disabled, none of new group will be permitted.')
hpnicfMcsPortConfigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 123, 1, 8, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfMcsPortConfigRowStatus.setStatus('current')
if mibBuilder.loadTexts: hpnicfMcsPortConfigRowStatus.setDescription("The object is responsible for managing the creation and deletion of rows, which supports 'active', 'createAndGo' and 'destroy'.")
mibBuilder.exportSymbols("HPN-ICF-MULTICAST-SNOOPING-MIB", hpnicfMcsVUVersion=hpnicfMcsVUVersion, hpnicfMcsRouterPortConfigIfIndex=hpnicfMcsRouterPortConfigIfIndex, hpnicfMcsPortJoinGroupStatus=hpnicfMcsPortJoinGroupStatus, hpnicfMcsPortConfigEntry=hpnicfMcsPortConfigEntry, hpnicfMcsPortStaticGroupConfigTable=hpnicfMcsPortStaticGroupConfigTable, hpnicfMcsTxV3SpecificQueryNum=hpnicfMcsTxV3SpecificQueryNum, hpnicfMcsPortConfigVlanID=hpnicfMcsPortConfigVlanID, hpnicfMcsGlobalConfigTable=hpnicfMcsGlobalConfigTable, hpnicfMcsGlbDropUnknownEnabled=hpnicfMcsGlbDropUnknownEnabled, hpnicfMcsStatisticsSnoopingType=hpnicfMcsStatisticsSnoopingType, hpnicfMcsPortStaticGroupConfigEntry=hpnicfMcsPortStaticGroupConfigEntry, hpnicfMcsPacketStatisticsTable=hpnicfMcsPacketStatisticsTable, hpnicfMcsRxV3ReportNum=hpnicfMcsRxV3ReportNum, hpnicfMcsPortStaticGroupStatus=hpnicfMcsPortStaticGroupStatus, hpnicfMcsGlbRowStatus=hpnicfMcsGlbRowStatus, hpnicfMcsL2EntrySourceAddress=hpnicfMcsL2EntrySourceAddress, hpnicfMcsL2EntryIfIndex=hpnicfMcsL2EntryIfIndex, hpnicfMcsPacketStatisticsEntry=hpnicfMcsPacketStatisticsEntry, hpnicfMcsPortJoinGroupConfigTable=hpnicfMcsPortJoinGroupConfigTable, hpnicfMcsPortJoinGroupSourceAddress=hpnicfMcsPortJoinGroupSourceAddress, hpnicfMcsL2EntryPortType=hpnicfMcsL2EntryPortType, hpnicfMcsVUHostAgingTime=hpnicfMcsVUHostAgingTime, hpnicfMcsGlobalConfigEntry=hpnicfMcsGlobalConfigEntry, hpnicfMcsRouterPortConfigSnoopingType=hpnicfMcsRouterPortConfigSnoopingType, hpnicfMcsRxGeneryQueryNum=hpnicfMcsRxGeneryQueryNum, hpnicfMcsGlbHostAgingTime=hpnicfMcsGlbHostAgingTime, hpnicfMcsVUPimSnoopingEnabled=hpnicfMcsVUPimSnoopingEnabled, hpnicfMcsVUSnoopingType=hpnicfMcsVUSnoopingType, hpnicfMcsGlbLastMemQryInterval=hpnicfMcsGlbLastMemQryInterval, hpnicfMcsPortConfigRowStatus=hpnicfMcsPortConfigRowStatus, hpnicfMcsVUSpecQuerierSourceAddress=hpnicfMcsVUSpecQuerierSourceAddress, hpnicfMcsVUDropUnknownEnabled=hpnicfMcsVUDropUnknownEnabled, hpnicfMcsVULeaveSourceAddress=hpnicfMcsVULeaveSourceAddress, hpnicfMcsL2EntryGroupAddress=hpnicfMcsL2EntryGroupAddress, hpnicfMcsPortConfigGroupPolicyParameter=hpnicfMcsPortConfigGroupPolicyParameter, hpnicfMcsVULastMemQryInterval=hpnicfMcsVULastMemQryInterval, hpnicfMcsPortStaticGroupIfIndex=hpnicfMcsPortStaticGroupIfIndex, hpnicfMcsRouterPortConfigVlanID=hpnicfMcsRouterPortConfigVlanID, hpnicfMcsGlbRouterAgingTime=hpnicfMcsGlbRouterAgingTime, hpnicfMcsL2EntryPortAttribute=hpnicfMcsL2EntryPortAttribute, hpnicfMcsRouterPortConfigTable=hpnicfMcsRouterPortConfigTable, hpnicfMcsTxV2SpecificQueryNum=hpnicfMcsTxV2SpecificQueryNum, hpnicfMcsPortStaticGroupGroupAddress=hpnicfMcsPortStaticGroupGroupAddress, hpnicfMcsPortConfigTable=hpnicfMcsPortConfigTable, hpnicfMcsPortJoinGroupGroupAddress=hpnicfMcsPortJoinGroupGroupAddress, hpnicfMcsRxV3ErrCorReportNum=hpnicfMcsRxV3ErrCorReportNum, hpnicfMcsRouterPortConfigRowStatus=hpnicfMcsRouterPortConfigRowStatus, hpnicfMcsVUMaxResponseTime=hpnicfMcsVUMaxResponseTime, hpnicfMcsVUQuerierEnabled=hpnicfMcsVUQuerierEnabled, hpnicfMcsVirtualUnitConfigTable=hpnicfMcsVirtualUnitConfigTable, hpnicfMcsPortStaticGroupSnoopingType=hpnicfMcsPortStaticGroupSnoopingType, hpnicfMcsRxLeaveNum=hpnicfMcsRxLeaveNum, hpnicfMcsL2EntryEntry=hpnicfMcsL2EntryEntry, hpnicfMcsL2EntryVUID=hpnicfMcsL2EntryVUID, hpnicfMcsL2EntryTable=hpnicfMcsL2EntryTable, hpnicfMcsVUGeneQuerierSourceAddress=hpnicfMcsVUGeneQuerierSourceAddress, hpnicfMcsGlbEntryLimit=hpnicfMcsGlbEntryLimit, hpnicfMcsRxV3SpecificQueryNum=hpnicfMcsRxV3SpecificQueryNum, hpnicfMcsGlbMaxResponseTime=hpnicfMcsGlbMaxResponseTime, hpnicfMcsPortJoinGroupVlanID=hpnicfMcsPortJoinGroupVlanID, hpnicfMcsGlbSnoopingType=hpnicfMcsGlbSnoopingType, hpnicfMcsRxV2ReportNum=hpnicfMcsRxV2ReportNum, hpnicfMcsVirtualUnitConfigEntry=hpnicfMcsVirtualUnitConfigEntry, hpnicfMcsPortJoinGroupSnoopingType=hpnicfMcsPortJoinGroupSnoopingType, hpnicfMulticastSnoopObject=hpnicfMulticastSnoopObject, hpnicfMcsRxV2SpecificQueryNum=hpnicfMcsRxV2SpecificQueryNum, hpnicfMcsRouterPortConfigEntry=hpnicfMcsRouterPortConfigEntry, PYSNMP_MODULE_ID=hpnicfMulticastSnoop, HpnicfVirtualUnitType=HpnicfVirtualUnitType, hpnicfMcsTxV3SpecificSGQueryNum=hpnicfMcsTxV3SpecificSGQueryNum, hpnicfMcsPortStaticGroupSourceAddress=hpnicfMcsPortStaticGroupSourceAddress, hpnicfMcsVUQuerierInterval=hpnicfMcsVUQuerierInterval, hpnicfMcsVURouterAgingTime=hpnicfMcsVURouterAgingTime, hpnicfMcsPortConfigIfIndex=hpnicfMcsPortConfigIfIndex, hpnicfMcsVUReportSourceAddress=hpnicfMcsVUReportSourceAddress, hpnicfMcsPortConfigSnoopingType=hpnicfMcsPortConfigSnoopingType, hpnicfMcsRxErrorPacketNum=hpnicfMcsRxErrorPacketNum, hpnicfMulticastSnoop=hpnicfMulticastSnoop, hpnicfMcsRxV3SpecificSGQueryNum=hpnicfMcsRxV3SpecificSGQueryNum, hpnicfMcsRxV1ReportNum=hpnicfMcsRxV1ReportNum, hpnicfMcsPortJoinGroupConfigEntry=hpnicfMcsPortJoinGroupConfigEntry, hpnicfMcsL2EntryVUType=hpnicfMcsL2EntryVUType, hpnicfMcsVURowStatus=hpnicfMcsVURowStatus, hpnicfMcsVUID=hpnicfMcsVUID, hpnicfMcsPortConfigGroupLimitNumber=hpnicfMcsPortConfigGroupLimitNumber, hpnicfMcsPortJoinGroupIfIndex=hpnicfMcsPortJoinGroupIfIndex, hpnicfMcsPortConfigFastLeaveStatus=hpnicfMcsPortConfigFastLeaveStatus, hpnicfMcsL2EntryAddressType=hpnicfMcsL2EntryAddressType, hpnicfMcsPortConfigOverflowReplace=hpnicfMcsPortConfigOverflowReplace, hpnicfMcsVUType=hpnicfMcsVUType, hpnicfMcsPortStaticGroupVlanID=hpnicfMcsPortStaticGroupVlanID, hpnicfMcsRxPimHelloNum=hpnicfMcsRxPimHelloNum)
| [
"[email protected]"
] | |
3cd0cb1f539962e82c9f27b78dcb28ab1cbaeb63 | 5821e83ccdd97c7d4d472b6d854490950854ba9f | /code/pike_ex1.py | 7de4a0dee8d596e0ba3e94f7e626d74c313856d9 | [
"CC-BY-4.0"
] | permissive | dhellmann/presentation-regex-implementations | b44b6b0a2ed6304c1c7f63c8702d23f7aa54cdb2 | c02b74651876b596201c59b0d35758ef36298081 | refs/heads/master | 2022-12-15T20:21:24.953111 | 2019-11-03T20:34:06 | 2019-11-03T20:34:06 | 219,158,579 | 1 | 0 | null | 2022-12-11T11:47:57 | 2019-11-02T13:35:33 | CSS | UTF-8 | Python | false | false | 65 | py | #!/usr/bin/env python3
import pike
pike.match('ab*c', 'abccd')
| [
"[email protected]"
] | |
757ea65c43bbe012863e19534632fc725f47f4b1 | 1ec6d8691fa9355a53fbcf93d3a47b4a15a80fe6 | /build/lib/lcopt/__init__.py | 2ad3293a32502e9c30c5fc0a521d5a5c76c8588a | [
"BSD-3-Clause"
] | permissive | cmutel/lcopt | f3be8bbd0808323de807c56a8a501993d2e7fd8b | 4c9087a5812f0d86804cdda999ef3bdf491a6fa8 | refs/heads/master | 2021-01-22T14:45:51.072282 | 2017-08-18T11:33:08 | 2017-08-18T11:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | from lcopt.io import *
from lcopt.model import * | [
"[email protected]"
] | |
c9ea21dbe986fc835e4e862c46e843454f7a3906 | 1c78b144662c5d0473e53ff5622e6cbf8c593ef9 | /nba_matchup/yfs.py | 6f89a3e678a6563409b64ce31519d6b5e36ce0aa | [
"MIT"
] | permissive | sharadmv/nba-fantasy-optimize | fe1eb45df26a8c23b47de1ff5ad98ada877dfd77 | 81fdfa78fce4ce356220c91a1063a6efcfa4ff02 | refs/heads/master | 2022-12-13T10:34:51.756539 | 2021-11-23T05:54:12 | 2021-11-23T05:54:12 | 168,676,401 | 4 | 0 | MIT | 2022-12-08T01:44:37 | 2019-02-01T09:48:53 | Python | UTF-8 | Python | false | false | 774 | py | from yaspin import yaspin
import datetime
from yahoo_oauth import OAuth2
from fantasy_sport import FantasySport
__all__ = ['yfs', 'LEAGUE_KEY', 'CURRENT_WEEK', 'START_DATE']
LEAGUE_KEY = "nba.l.64384"
oauth = OAuth2(None, None, from_file='oauth.json', base_url='https://fantasysports.yahooapis.com/fantasy/v2/')
yfs = FantasySport(oauth, fmt='json')
with yaspin(text="Fetching league data", color='cyan'):
response = yfs.get_leagues([LEAGUE_KEY]).json()['fantasy_content']['leagues']['0']['league'][0]
START_DATE = datetime.datetime.strptime(response['start_date'], "%Y-%m-%d").date()
while START_DATE.weekday() != 0:
START_DATE -= datetime.timedelta(days=1)
diff = datetime.datetime.today().date() - START_DATE
CURRENT_WEEK = response.get('current_week', None)
| [
"[email protected]"
] | |
5c48e7effb0eb65a92e95ec6ab09a44a7f7f028d | 192dec1ea734fd67a3c3720228826cf754b2da5a | /valeo/vr/models.py | a1a86f3d91add9b73c1e35fc55c2fc8182a5293b | [] | no_license | fafaschiavo/cpi_valeo | a4df4e64161e58e44ade276f0b6284abfb5af6d2 | 777ef6173bbc4bf5941098cb2ea3b13fccf490c1 | refs/heads/master | 2020-04-06T04:14:59.226013 | 2017-05-02T22:39:00 | 2017-05-02T22:39:00 | 82,980,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class joystick(models.Model):
joystick_data = models.CharField(max_length=200)
angle = models.IntegerField(default=90)
pedal = models.FloatField(default=0)
left_buttons = models.IntegerField(default=0)
right_buttons = models.IntegerField(default=0)
# state = joystick(joystick_data = '', angle = 90, pedal = 0, left_buttons = 0, right_buttons = 0)
# state.save()
# joystick.objects.get(id = 1) | [
"[email protected]"
] | |
f6dba0a7e196a8718ed2fb1a7978fd42953ee6e8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_322/ch30_2019_03_26_18_43_32_991323.py | 901b0a5a75d9547c3b7aa65aa4eecdd7ec4a9796 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | import math
velocidade = int(input("Qual a velocidade?:"))
angulo = int(input("Qual o angulo?:"))
distancia = (((velocidade ** 2) * (math.sin(2 * math.radians( angulo))))) / (9.8)
if distancia >= 98 and distancia <= 102
print('Acertou!')
if distancia < 98:
print('Muito perto')
if distancia > 102:
print('Muito longe') | [
"[email protected]"
] | |
e8fedba445c7d316d56ebdb5ed0292e721f568e9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/162/61510/submittedfiles/testes.py | fdf544d79486196cce5adcbccad32091483213bf | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import numpy as np
def menorlinha(a):
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
if a[i,j]==1:
return(i)
def menorcoluna(a):
for j in range(0,a.shape[1],1):
for i in range(0,a.shape[0],1):
if a[i,j]==1:
return(j)
def maiorlinha(a):
for i in range(a.shape[0]-1,-1,-1):
for j in range(a.shape[1]-1,-1,-1):
if a[i,j]==1:
return(i)
def maiorlinha(a):
for j in range(a.shape[0]-1,-1,-1):
for i in range(a.shape[1]-1,-1,-1):
if a[i,j]==1:
return(j)
linhas=int(input('linhas: '))
colunas=int(input('colunas: '))
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=int(input('digite a matriz'))
x=menorlinha(a)
y=maiorlinha(a)
z=menorcoluna(a)
w=maiorcoluna(a)
print(a[x:y+1,z:w+1])
| [
"[email protected]"
] | |
f97d1a8d1a8a7dc659883a5b5bc249e619f17c03 | f1c071430a352ef82a4e7b902d6081851e5d569a | /neuronunit/capabilities/channel.py | 87c52e19babe3b1c5dbc25efe5553022cee45991 | [] | no_license | rgerkin/neuronunit | b1e5aeadc03c0be3507b0182ae81c89371c5f899 | 85330f1c4e4206b347d5a5e7792d41536ae71a0a | refs/heads/master | 2021-01-17T05:41:06.109463 | 2015-10-23T22:22:30 | 2015-10-23T22:22:30 | 8,037,496 | 2 | 1 | null | 2015-06-25T15:32:01 | 2013-02-05T20:37:39 | Python | UTF-8 | Python | false | false | 1,382 | py | """Channel capabilities"""
import inspect
import sciunit
class NML2_Channel_Runnable(sciunit.Capability):
"""Capability for models that can be run using functions available in pyNeuroML.analsysi.NML2ChannelAnalysis"""
def NML2_channel_run(self,**run_params):
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
class ProducesIVCurve(sciunit.Capability):
"""The capability to produce a current-voltage plot for a set of voltage steps"""
def produce_iv_curve(self, **run_params):
"""Produces steady-state and peak IV curve at voltages and conditions given according to 'run_params'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
def produce_iv_curve_ss(self,**run_params):
"""Produces steady-state IV curve at voltages and conditions given according to 'run_params'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
def produce_iv_curve_peak(self,*run_params):
"""Produces peak current IV curve at voltages and conditions given according to 'run_params'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
def plot_iv_curve(self,iv_data):
"""Plots IV Curve using results from 'iv_data'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3]) | [
"[email protected]"
] | |
e7391ef71192ed06e3c4ff224131362673034076 | 2b4badbedab24ed4376ab65818d0e59af6539144 | /messcode/mail_first.py | 5e2d1b63c30dd456564b90b3732846639eeebee1 | [] | no_license | matthewangbin/Python | 878d8180d12d235f8d238574414bb41edad5ceee | c9a94b4203380a06364da1f7466aafc4b141d951 | refs/heads/master | 2021-10-11T01:45:22.890144 | 2019-01-21T02:44:25 | 2019-01-21T02:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | # -*- coding: utf-8 -*-
# @Time : 2017/12/25 21:55
# @Author : Matthew
# @Site :
# @File : mail_first.py
# @Software: PyCharm
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr(( \
Header(name, 'utf-8').encode(), \
addr.encode('utf-8') if isinstance(addr, unicode) else addr))
from_addr = '[email protected]'
password = 'wb284745'
to_addr = '[email protected]'
smtp_server = 'smtp.163.com'
msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
msg['From'] = _format_addr(u'Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr(u'管理员 <%s>' % to_addr)
msg['Subject'] = Header(u'来自SMTP的问候……', 'utf-8').encode()
#msg.attach(MIMEText('send with files','plain','utf-8'))
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit() | [
"[email protected]"
] | |
87cc4210ce4c5b742377c17cba6924f894a19b86 | c1ba3127b3526aba8b9bf25fddd172020a8858a8 | /easy/array/max_product_of_3_numbers/max_product_of_3_numbers.py | 92170a0b39e7a4c064842c5fbae87866cdf17d9c | [
"MIT"
] | permissive | deepshig/leetcode-solutions | f5c627215e79323dba3bb6d4005e35e33f31c858 | 1e99e0852b8329bf699eb149e7dfe312f82144bc | refs/heads/master | 2022-11-30T20:50:02.007164 | 2020-08-06T19:21:02 | 2020-08-06T19:21:02 | 279,260,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first_min, second_min = float("inf"), float("inf")
first_max, second_max, third_max = - \
float("inf"), -float("inf"), -float("inf")
for n in nums:
if n <= first_min:
second_min, first_min = first_min, n
elif n <= second_min:
second_min = n
if n >= first_max:
third_max, second_max, first_max = second_max, first_max, n
elif n >= second_max:
third_max, second_max = second_max, n
elif n >= third_max:
third_max = n
product_1 = first_min*second_min*first_max
product_2 = first_max*second_max*third_max
return max(product_1, product_2)
s = Solution()
print("Solution 1 : ", s.maximumProduct([1, 2, 3]))
print("Solution 2 : ", s.maximumProduct([1, 2, 3, 4]))
print("Solution 3 : ", s.maximumProduct([-4, -3, -2, -1, 60]))
| [
"[email protected]"
] | |
fdf0aba38c57fae8c11e4e5cdb741c8e4a0951be | 743ad4ed116b838da917f105909d9e84e10a4b31 | /day06/ResponseAndRequest/ResponseAndRequest/spiders/myspider.py | 7568ce3d3f4c4f39a6c46469c61ae47cf2fc81a5 | [] | no_license | heiyouyou/Scrapy | 62bb90638a8d6ee1aa62dcf525463c6b0a6a46e6 | afa74f885d30ae3486b1da52dc90d0b7118f4dc1 | refs/heads/master | 2021-09-07T12:44:11.063627 | 2018-02-23T03:05:39 | 2018-02-23T03:05:39 | 113,541,279 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://example.com']
def parse(self,response):
print('Existing settings: %s' % self.settings.attributes.keys()) | [
"[email protected]"
] | |
6f0728d8e2f5aeeb689a2bb3c96ffab2ed3f3d84 | 3d154d9b3fe7487356d155c23d2b3541dacae1c1 | /dao/userhelper.py | dc1d343b7a1cb51c8d4dcdd22c7e3441947cccc7 | [] | no_license | igortereshchenko/python_oracle_orm_service | ef847fff7d0762813edf64f54235f471cdccd62f | d824fa0f01b2fdcc92b053ea942bb382266a0b43 | refs/heads/master | 2020-05-23T12:06:54.729982 | 2019-05-16T19:12:05 | 2019-05-16T19:12:05 | 186,751,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | from dao.db import OracleDb
import cx_Oracle
class UserHelper:
def __init__(self):
self.db = OracleDb()
def getSkillData(self, skill_name=None):
if skill_name:
skill_name="'{0}'".format(skill_name)
else:
skill_name='null'
query = "select * from table(user_skillS.GetSkillData({0}))".format(skill_name)
result = self.db.execute(query)
return result.fetchall()
def getUserId(self, user_email, user_password):
user_id = self.db.cursor.callfunc("USER_AUTH.GET_USER_ID", cx_Oracle.NATIVE_INT, [user_email, user_password])
return user_id
def newUser(self, USER_STUDYBOOK, USER_YEAR, USER_NAME, USER_EMAIL, USER_BIRTHDAY, USER_PASSWORD):
cursor = self.db.cursor
user_id = cursor.var(cx_Oracle.NATIVE_INT)
status = cursor.var(cx_Oracle.STRING)
cursor.callproc("USER_AUTH.NEW_USER", [user_id, status, USER_STUDYBOOK, USER_YEAR, USER_NAME, USER_EMAIL, USER_BIRTHDAY.upper(), USER_PASSWORD])
return user_id.getvalue(), status.getvalue()
def getUsers(self):
return self.db.execute('SELECT * FROM "user"').fetchall()
if __name__ == "__main__":
helper = UserHelper()
print(helper.getSkillData('Java'))
print(helper.getSkillData())
print(helper.getUserId('[email protected]','222'))
print(helper.newUser('KM5555', '10-OCT-17', 'Kate', '[email protected]', '21-OCT-97','555'))
print(helper.getUsers()) | [
"[email protected]"
] | |
cc7e03607b31f4438fef4e654117ffd8353d2dc4 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/minimum-initial-energy-to-finish-tasks/422867377.py | 4ecf5f68fe76384049941bdc723c184406bd6d20 | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | # title: minimum-initial-energy-to-finish-tasks
# detail: https://leetcode.com/submissions/detail/422867377/
# datetime: Sun Nov 22 14:20:04 2020
# runtime: 1260 ms
# memory: 59.4 MB
class Solution:
def minimumEffort(self, tasks: List[List[int]]) -> int:
'''
假设T是tasks列表的最优排列,初始energy等于E,最后剩余的energy等于L,
题目转化为找到T和L使得E最小。
从后往前推导:
第n步,E[n] = L
第n - 1步,E[n - 1] = max(E[n] + T[n - 1][0], T[n - 1][1])
第n - 2步,E[n - 2] = max(E[n - 1] + T[n - 2][0], T[n - 2][1])
...
第0步, E[0] = max(E[1] + T[0][0], E[0] >= T[0][1])
递推公式: E[n] = max(E[n + 1] + T[n][0], E[n] >= T[n][1])
假设只有一个task, 那么E[0]等于T[0][1], E[1] = T[0][1] - T[0][0].
'''
tasks.sort(key=lambda p: p[1] - p[0])
e = 0
for a, m in tasks:
e = max(e + a, m)
return e | [
"[email protected]"
] | |
4085500854bc565a03cb7ed04cbd39c6bb4c3dca | b7255be7fc09f7fd8178b820c6ff3c69d7e4d750 | /flask-api/python_go/pythonjs/runtime/go_builtins.py | 52c0758a59fb24623525b8c7a61e7f2eaaca7eb8 | [] | no_license | divyajyotiuk/go-asn-playground | e65bcd5474674005fb64567ec205b3b5f757e438 | ee7fd0c57e86f84e045bbc888fb3c4f265bdb969 | refs/heads/master | 2021-02-05T02:09:02.338834 | 2020-04-13T10:40:31 | 2020-04-13T10:40:31 | 243,732,017 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # PythonJS Go builtins
# by Brett Hartshorn - copyright 2014
# License: "New BSD"
import strconv
inline("""
type __object__ struct {
__class__ string
}
type object interface{
getclassname() string
}
func (self __object__) getclassname() string {
return self.__class__
}
func ord(x string) int {
r := []rune(x)
return int(r[0])
}
func __test_if_true__(v interface{}) bool {
switch v.(type) {
case nil:
return false
case int:
i,_ := v.(int)
return i != 0
case float64:
i,_ := v.(int)
return i != 0.0
case bool:
b,_ := v.(bool)
return b
case string:
s,_ := v.(string)
return s != ""
default:
return false
}
}
func str(v interface{}) string {
switch v.(type) {
case nil:
return "None"
case int:
i,_ := v.(int)
return strconv.Itoa(i)
case float64:
return "TODO float"
case bool:
b,_ := v.(bool)
if b { return "True"
} else { return "False" }
case string:
s,_ := v.(string)
return s
default:
return "TODO unknown type"
}
}
func range1( x int ) *[]int {
arr := make([]int, x)
for i := 0; i < x; i++ {
arr[i]=i
}
return &arr
}
func range2( start int, stop int ) *[]int {
arr := make([]int, stop-start)
for i := start; i < stop; i++ {
arr[i]=i
}
return &arr
}
func range3( start int, stop int, step int ) *[]int {
arr := make([]int, stop-start)
for i := start; i < stop; i+=step {
arr[i]=i
}
return &arr
}
""")
| [
"[email protected]"
] | |
bd866540d8720bd5ec59b2b3bc0c4f34b4c1c817 | 4cf14ded3e404a9801f7fc1103d7a72019fecd0c | /alembic/unused_versions/b70252e34014_use_sqlitecompat_module.py | f581905adf8096ee4040a7550b77a921cd531c63 | [] | no_license | elthran/RPG-Game | 8315aac6b0b162e9233a901d5af5c018ca4bf9d1 | 6168d7938c72a5a0bb36ca40b96a2a7232021cb5 | refs/heads/master | 2018-09-23T14:51:27.111954 | 2018-06-12T18:28:02 | 2018-06-12T18:28:02 | 64,792,548 | 0 | 0 | null | 2018-06-12T18:32:20 | 2016-08-02T21:05:58 | Python | UTF-8 | Python | false | false | 992 | py | """Use SQLiteCompat module to drop a column.
Also first revision, yay!
IMPORTANT!!
Fancy method to drop a column when using SQLite.
Yes, it it super long and stupidly complex.
All it does is replicate:
op.drop_column('forum', 'title')
Revision ID: b70252e34014
Revises:
Create Date: 2018-01-31 20:48:18.530044
"""
from alembic import op
import sqlalchemy as sa
import sys
import os
# Get the name of the current directory for this file and split it.
old_path = os.path.dirname(os.path.abspath(__file__)).split(os.sep)
new_path = os.sep.join(old_path[:-1])
# -1 refers to how many levels of directory to go up
sys.path.insert(0, new_path)
from sqlite_compat import SQLiteCompat
sys.path.pop(0)
# revision identifiers, used by Alembic.
revision = 'b70252e34014'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.add_column('forum', sa.Column('title', sa.String))
def downgrade():
compat = SQLiteCompat()
compat.drop_column('forum', 'title')
| [
"[email protected]"
] | |
567b57a6d4840a4a37fbb66684b700fe4a6dd28f | 74983098c5de53007bde6052a631845c781b5ba8 | /hartmann6/hartmann6_54/hartmann6.py | bbbeab0123de21bfab6548b67e88e58a1eaed978 | [] | no_license | numairmansur/Experiments | 94ccdd60f4c2cf538fab41556ac72405656c9d77 | 592f39916461c7a9f7d400fa26f849043d1377ed | refs/heads/master | 2021-04-29T12:39:16.845074 | 2017-02-15T07:36:47 | 2017-02-15T07:36:47 | 78,043,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import numpy as np
import sys
import math
import time
import csv
from hpolib.benchmarks.synthetic_functions import Hartmann6
from time import gmtime, strftime
def main(job_id, params):
print '!!! Entered Main !!!'
print 'Anything printed here will end up in the output directory for job #:', str(job_id)
print params
f = Hartmann6()
res = f.objective_function([params['x'], params['y'], params['z'], params['xx'], params['yy'], params['zz']])
print res
with open('/home/mansurm/Experiments/hartmann6/run54.csv','a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow([res['main'][0]])
return res['main'][0]
| [
"[email protected]"
] | |
1bf849a5f322986e8eb6180f2477adc70b8f1651 | 63b0fed007d152fe5e96640b844081c07ca20a11 | /yukicoder/MMA Contest 016/f.py | 8898f332475726cac9f98f6195e8c1c880d65675 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | S = input()
N = len(S)
alp = [[] for _ in range(26)]
ans = 0
for i, v in enumerate(S):
alp[ord(v)-65].append(i)
for i in range(26):
for j in range(1, len(alp[i])):
idx = alp[i][j]
tmp = j*(N-idx-1-(len(alp[i])-j-1))
ans += tmp
print(ans)
| [
"[email protected]"
] | |
eb3684cf5f531df58ed97cd89a662dc3087080a9 | d951b8b2d87121947d7c0bfb7ad659d0ddb247ee | /thermo/electrochem.py | 7502bbbfb717815d2e9d71c321eec47f79b8becc | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | alchemyst/thermo | 5c86cadfe41cbbd7c6fb15825b3dfa5981f94439 | 745ea4733e7ec5d3a4e0cad43f9e8b855ab5968b | refs/heads/master | 2020-12-02T06:35:55.370811 | 2017-07-09T18:46:16 | 2017-07-09T18:46:16 | 96,860,644 | 3 | 0 | null | 2017-07-11T06:50:02 | 2017-07-11T06:50:02 | null | UTF-8 | Python | false | false | 26,515 | py | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['conductivity', 'Laliberte_density', 'Laliberte_heat_capacity',
'Laliberte_viscosity', 'Laliberte_data', 'Laliberte_viscosity_w',
'Laliberte_viscosity_i', 'Laliberte_density_w',
'Laliberte_density_i', 'Laliberte_heat_capacity_w',
'Laliberte_heat_capacity_i', 'Lange_cond_pure',
'conductivity_methods', 'Magomedovk_thermal_cond',
'thermal_conductivity_Magomedov', 'ionic_strength', 'Kweq_1981',
'Kweq_IAPWS_gas', 'Kweq_IAPWS']
import os
from thermo.utils import exp, log10
from thermo.utils import e, N_A
from thermo.utils import to_num
from scipy.interpolate import interp1d
import pandas as pd
F = e*N_A
folder = os.path.join(os.path.dirname(__file__), 'Electrolytes')
_Laliberte_Density_ParametersDict = {}
_Laliberte_Viscosity_ParametersDict = {}
_Laliberte_Heat_Capacity_ParametersDict = {}
# Do not re-implement with Pandas, as current methodology uses these dicts in each function
with open(os.path.join(folder, 'Laliberte2009.tsv')) as f:
next(f)
for line in f:
values = to_num(line.split('\t'))
_name, CASRN, _formula, _MW, c0, c1, c2, c3, c4, Tmin, Tmax, wMax, pts = values[0:13]
if c0:
_Laliberte_Density_ParametersDict[CASRN] = {"Name":_name, "Formula":_formula,
"MW":_MW, "C0":c0, "C1":c1, "C2":c2, "C3":c3, "C4":c4, "Tmin":Tmin, "Tmax":Tmax, "wMax":wMax}
v1, v2, v3, v4, v5, v6, Tmin, Tmax, wMax, pts = values[13:23]
if v1:
_Laliberte_Viscosity_ParametersDict[CASRN] = {"Name":_name, "Formula":_formula,
"MW":_MW, "V1":v1, "V2":v2, "V3":v3, "V4":v4, "V5":v5, "V6":v6, "Tmin":Tmin, "Tmax":Tmax, "wMax":wMax}
a1, a2, a3, a4, a5, a6, Tmin, Tmax, wMax, pts = values[23:34]
if a1:
_Laliberte_Heat_Capacity_ParametersDict[CASRN] = {"Name":_name, "Formula":_formula,
"MW":_MW, "A1":a1, "A2":a2, "A3":a3, "A4":a4, "A5":a5, "A6":a6, "Tmin":Tmin, "Tmax":Tmax, "wMax":wMax}
Laliberte_data = pd.read_csv(os.path.join(folder, 'Laliberte2009.tsv'),
sep='\t', index_col=0)
### Laliberty Viscosity Functions
def Laliberte_viscosity_w(T):
r'''Calculate the viscosity of a water using the form proposed by [1]_.
No parameters are needed, just a temperature. Units are Kelvin and Pa*s.
t is temperature in degrees Celcius.
.. math::
\mu_w = \frac{t + 246}{(0.05594t+5.2842)t + 137.37}
Parameters
----------
T : float
Temperature of fluid [K]
Returns
-------
mu_w : float
Water viscosity, Pa*s
Notes
-----
Original source or pure water viscosity is not cited.
No temperature range is given for this equation.
Examples
--------
>>> Laliberte_viscosity_w(298)
0.0008932264487033279
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T-273.15
mu_w = (t + 246)/((0.05594*t+5.2842)*t + 137.37)
return mu_w/1000.
def Laliberte_viscosity_i(T, w_w, v1, v2, v3, v4, v5, v6):
r'''Calculate the viscosity of a solute using the form proposed by [1]_
Parameters are needed, and a temperature. Units are Kelvin and Pa*s.
.. math::
\mu_i = \frac{\exp\left( \frac{v_1(1-w_w)^{v_2}+v_3}{v_4 t +1}\right)}
{v_5(1-w_w)^{v_6}+1}
Parameters
----------
T : float
Temperature of fluid [K]
w_w : float
Weight fraction of water in the solution
v1-v6 : floats
Function fit parameters
Returns
-------
mu_i : float
Solute partial viscosity, Pa*s
Notes
-----
Temperature range check is outside of this function.
Check is performed using NaCl at 5 degC from the first value in [1]_'s spreadsheet.
Examples
--------
>>> d = _Laliberte_Viscosity_ParametersDict['7647-14-5']
>>> Laliberte_viscosity_i(273.15+5, 1-0.005810, d["V1"], d["V2"], d["V3"], d["V4"], d["V5"], d["V6"] )
0.004254025533308794
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T-273.15
mu_i = exp((v1*(1-w_w)**v2 + v3)/(v4*t+1))/(v5*(1-w_w)**v6 + 1)
return mu_i/1000.
def Laliberte_viscosity(T, ws, CASRNs):
r'''Calculate the viscosity of an aqueous mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\mu_m = \mu_w^{w_w} \Pi\mu_i^{w_i}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
mu_i : float
Solute partial viscosity, Pa*s
Notes
-----
Temperature range check is not used here.
Check is performed using NaCl at 5 degC from the first value in [1]_'s spreadsheet.
Examples
--------
>>> Laliberte_viscosity(273.15+5, [0.005810], ['7647-14-5'])
0.0015285828581961414
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
mu_w = Laliberte_viscosity_w(T)*1000.
w_w = 1 - sum(ws)
mu = mu_w**(w_w)
for i in range(len(CASRNs)):
d = _Laliberte_Viscosity_ParametersDict[CASRNs[i]]
mu_i = Laliberte_viscosity_i(T, w_w, d["V1"], d["V2"], d["V3"], d["V4"], d["V5"], d["V6"])*1000.
mu = mu_i**(ws[i])*mu
return mu/1000.
### Laliberty Density Functions
def Laliberte_density_w(T):
r'''Calculate the density of water using the form proposed by [1]_.
No parameters are needed, just a temperature. Units are Kelvin and kg/m^3h.
.. math::
\rho_w = \frac{\left\{\left([(-2.8054253\times 10^{-10}\cdot t +
1.0556302\times 10^{-7})t - 4.6170461\times 10^{-5}]t
-0.0079870401\right)t + 16.945176 \right\}t + 999.83952}
{1 + 0.01687985\cdot t}
Parameters
----------
T : float
Temperature of fluid [K]
Returns
-------
rho_w : float
Water density, [kg/m^3]
Notes
-----
Original source not cited
No temperature range is used.
Examples
--------
>>> Laliberte_density_w(298.15)
997.0448954179155
>>> Laliberte_density_w(273.15 + 50)
988.0362916114763
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T-273.15
rho_w = (((((-2.8054253E-10*t + 1.0556302E-7)*t - 4.6170461E-5)*t - 0.0079870401)*t + 16.945176)*t + 999.83952) \
/ (1 + 0.01687985*t)
return rho_w
def Laliberte_density_i(T, w_w, c0, c1, c2, c3, c4):
r'''Calculate the density of a solute using the form proposed by Laliberte [1]_.
Parameters are needed, and a temperature, and water fraction. Units are Kelvin and Pa*s.
.. math::
\rho_{app,i} = \frac{(c_0[1-w_w]+c_1)\exp(10^{-6}[t+c_4]^2)}
{(1-w_w) + c_2 + c_3 t}
Parameters
----------
T : float
Temperature of fluid [K]
w_w : float
Weight fraction of water in the solution
c0-c4 : floats
Function fit parameters
Returns
-------
rho_i : float
Solute partial density, [kg/m^3]
Notes
-----
Temperature range check is TODO
Examples
--------
>>> d = _Laliberte_Density_ParametersDict['7647-14-5']
>>> Laliberte_density_i(273.15+0, 1-0.0037838838, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"])
3761.8917585699983
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T - 273.15
return ((c0*(1 - w_w)+c1)*exp(1E-6*(t + c4)**2))/((1 - w_w) + c2 + c3*t)
def Laliberte_density(T, ws, CASRNs):
r'''Calculate the density of an aqueous electrolyte mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\rho_m = \left(\frac{w_w}{\rho_w} + \sum_i \frac{w_i}{\rho_{app_i}}\right)^{-1}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
rho_i : float
Solution density, [kg/m^3]
Notes
-----
Temperature range check is not used here.
Examples
--------
>>> Laliberte_density(273.15, [0.0037838838], ['7647-14-5'])
1002.6250120185854
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
rho_w = Laliberte_density_w(T)
w_w = 1 - sum(ws)
rho = w_w/rho_w
for i in range(len(CASRNs)):
d = _Laliberte_Density_ParametersDict[CASRNs[i]]
rho_i = Laliberte_density_i(T, w_w, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"])
rho = rho + ws[i]/rho_i
return 1./rho
### Laliberty Heat Capacity Functions
_T_array = [-15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120, 125, 130, 135, 140]
_Cp_array = [4294.03, 4256.88, 4233.58, 4219.44, 4204.95, 4195.45, 4189.1, 4184.8, 4181.9, 4180.02, 4178.95, 4178.86, 4178.77, 4179.56, 4180.89, 4182.77, 4185.17, 4188.1, 4191.55, 4195.52, 4200.01, 4205.02, 4210.57, 4216.64, 4223.23, 4230.36, 4238.07, 4246.37, 4255.28, 4264.84, 4275.08, 4286.04]
Laliberte_heat_capacity_w_interp = interp1d(_T_array, _Cp_array, kind='cubic')
def Laliberte_heat_capacity_w(T):
r'''Calculate the heat capacity of water using the interpolation proposed by [1]_.
No parameters are needed, just a temperature.
.. math::
Cp_w = Cp_1 + (Cp_2-Cp_1) \left( \frac{t-t_1}{t_2-t_1}\right)
+ \frac{(Cp_3 - 2Cp_2 + Cp_1)}{2}\left( \frac{t-t_1}{t_2-t_1}\right)
\left( \frac{t-t_1}{t_2-t_1}-1\right)
Parameters
----------
T : float
Temperature of fluid [K]
Returns
-------
Cp_w : float
Water heat capacity, [J/kg/K]
Notes
-----
Units are Kelvin and J/kg/K.
Original source not cited
No temperature range is used.
The original equation is not used, but rather a cubic scipy interpolation routine.
Examples
--------
>>> Laliberte_heat_capacity_w(273.15+3.56)
4208.878020261102
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
return float(Laliberte_heat_capacity_w_interp(T - 273.15))
def Laliberte_heat_capacity_i(T, w_w, a1, a2, a3, a4, a5, a6):
r'''Calculate the heat capacity of a solute using the form proposed by [1]_
Parameters are needed, and a temperature, and water fraction.
.. math::
Cp_i = a_1 e^\alpha + a_5(1-w_w)^{a_6}
\alpha = a_2 t + a_3 \exp(0.01t) + a_4(1-w_w)
Parameters
----------
T : float
Temperature of fluid [K]
w_w : float
Weight fraction of water in the solution
a1-a6 : floats
Function fit parameters
Returns
-------
Cp_i : float
Solute partial heat capacity, [J/kg/K]
Notes
-----
Units are Kelvin and J/kg/K.
Temperature range check is TODO
Examples
--------
>>> d = _Laliberte_Heat_Capacity_ParametersDict['7647-14-5']
>>> Laliberte_heat_capacity_i(1.5+273.15, 1-0.00398447, d["A1"], d["A2"], d["A3"], d["A4"], d["A5"], d["A6"])
-2930.7353945880477
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T - 273.15
alpha = a2*t + a3*exp(0.01*t) + a4*(1. - w_w)
Cp_i = a1*exp(alpha) + a5*(1. - w_w)**a6
return Cp_i*1000.
def Laliberte_heat_capacity(T, ws, CASRNs):
r'''Calculate the heat capacity of an aqueous electrolyte mixture using the
form proposed by [1]_.
Parameters are loaded by the function as needed.
.. math::
TODO
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
Cp : float
Solution heat capacity, [J/kg/K]
Notes
-----
Temperature range check is not implemented.
Units are Kelvin and J/kg/K.
Examples
--------
>>> Laliberte_heat_capacity(273.15+1.5, [0.00398447], ['7647-14-5'])
4186.569908672113
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
Cp_w = Laliberte_heat_capacity_w(T)
w_w = 1 - sum(ws)
Cp = w_w*Cp_w
for i in range(len(CASRNs)):
d = _Laliberte_Heat_Capacity_ParametersDict[CASRNs[i]]
Cp_i = Laliberte_heat_capacity_i(T, w_w, d["A1"], d["A2"], d["A3"], d["A4"], d["A5"], d["A6"])
Cp = Cp + ws[i]*Cp_i
return Cp
#print Laliberte_heat_capacity(298.15, [0.1], ['7664-41-7']) #4186.0988
## Aqueous HCl, trying to find heat capacity of Cl- as H+ is zero.
#zero = Laliberte_heat_capacity(298.15, [0.0000000000000001], ['7647-01-0'])
#small = Laliberte_heat_capacity(298.15, [0.1], ['7647-01-0']) # 1 molal
#print zero, small
#print (zero-small)*36.46094/100
## cRC gives -136.4 J/mol
## I cannot reproduce this at all.
### Electrical Conductivity
Lange_cond_pure = pd.read_csv(os.path.join(folder, 'Lange Pure Species Conductivity.tsv'),
sep='\t', index_col=0)
LANGE_COND = "LANGE_COND"
NONE = 'None'
conductivity_methods = [LANGE_COND]
def conductivity(CASRN=None, AvailableMethods=False, Method=None, full_info=True):
r'''This function handles the retrieval of a chemical's conductivity.
Lookup is based on CASRNs. Will automatically select a data source to use
if no Method is provided; returns None if the data is not available.
Function has data for approximately 100 chemicals.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
kappa : float
Electrical conductivity of the fluid, [S/m]
T : float, only returned if full_info == True
Temperature at which conductivity measurement was made
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain RI with the given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
conductivity_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
conductivity for the desired chemical, and will return methods instead
of conductivity
full_info : bool, optional
If True, function will return the temperature at which the conductivity
reading was made
Notes
-----
Only one source is available in this function. It is:
* 'LANGE_COND' which is from Lange's Handbook, Table 8.34 Electrical
Conductivity of Various Pure Liquids', a compillation of data in [1]_.
Examples
--------
>>> conductivity('7732-18-5')
(4e-06, 291.15)
References
----------
.. [1] Speight, James. Lange's Handbook of Chemistry. 16 edition.
McGraw-Hill Professional, 2005.
'''
def list_methods():
methods = []
if CASRN in Lange_cond_pure.index:
methods.append(LANGE_COND)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == LANGE_COND:
kappa = float(Lange_cond_pure.at[CASRN, 'Conductivity'])
if full_info:
T = float(Lange_cond_pure.at[CASRN, 'T'])
elif Method == NONE:
kappa, T = None, None
else:
raise Exception('Failure in in function')
if full_info:
return kappa, T
else:
return kappa
Magomedovk_thermal_cond = pd.read_csv(os.path.join(folder, 'Magomedov Thermal Conductivity.tsv'),
sep='\t', index_col=0)
def thermal_conductivity_Magomedov(T, P, ws, CASRNs, k_w=None):
r'''Calculate the thermal conductivity of an aqueous mixture of
electrolytes using the form proposed by Magomedov [1]_.
Parameters are loaded by the function as needed. Function will fail if an
electrolyte is not in the database.
.. math::
\lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4}
w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i
Parameters
----------
T : float
Temperature of liquid [K]
P : float
Pressure of the liquid [Pa]
ws : array
Weight fractions of liquid components other than water
CASRNs : array
CAS numbers of the liquid components other than water
k_w : float
Liquid thermal condiuctivity or pure water at T and P, [W/m/K]
Returns
-------
kl : float
Liquid thermal condiuctivity, [W/m/K]
Notes
-----
Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%.
Internal untis are MPa for pressure and weight percent.
An example is sought for this function. It is not possible to reproduce
the author's values consistently.
Examples
--------
>>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827)
0.548654049375
References
----------
.. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and
Multicomponent Aqueous Solutions of Inorganic Substances at High
Parameters of State." High Temperature 39, no. 2 (March 1, 2001):
221-26. doi:10.1023/A:1017518731726.
'''
P = P/1E6
ws = [i*100 for i in ws]
if not k_w:
raise Exception('k_w correlation must be provided')
sum1 = 0
for i, CASRN in enumerate(CASRNs):
Ai = float(Magomedovk_thermal_cond.at[CASRN, 'Ai'])
sum1 += Ai*(ws[i] + 2E-4*ws[i]**3)
return k_w*(1 - sum1) - 2E-8*P*T*sum(ws)
def ionic_strength(mis, zis):
r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)])
def Kweq_1981(T, rho_w):
r'''Calculates equilibrium constant for OH- and H+ in water, according to
[1]_. Second most recent formulation.
.. math::
\log_{10} K_w= A + B/T + C/T^2 + D/T^3 + (E+F/T+G/T^2)\log_{10} \rho_w
Parameters
----------
T : float
Temperature of fluid [K]
rho_w : float
Density of water, [kg/m^3]
Returns
-------
Kweq : float
Ionization constant of water, [-]
Notes
-----
Density is internally converted to units of g/cm^3.
A = -4.098;
B = -3245.2;
C = 2.2362E5;
D = -3.984E7;
E = 13.957;
F = -1262.3;
G = 8.5641E5
Examples
--------
>>> -1*log10(Kweq_1981(600, 700))
11.274522047458206
References
----------
.. [1] Marshall, William L., and E. U. Franck. "Ion Product of Water
Substance, 0-1000 degree C, 1010,000 Bars New International Formulation
and Its Background." Journal of Physical and Chemical Reference Data 10,
no. 2 (April 1, 1981): 295-304. doi:10.1063/1.555643.
'''
rho_w = rho_w/1000.
A = -4.098
B = -3245.2
C = 2.2362E5
D = -3.984E7
E = 13.957
F = -1262.3
G = 8.5641E5
return 10**(A + B/T + C/T**2 + D/T**3 + (E + F/T + G/T**2)*log10(rho_w))
def Kweq_IAPWS_gas(T):
r'''Calculates equilibrium constant for OH- and H+ in water vapor,
according to [1]_.
This is the most recent formulation available.
.. math::
-log_{10} K_w^G = \gamma_0 + \gamma_1 T^{-1} + \gamma_2 T^{-2} + \gamma_3 T^{-3}
Parameters
----------
T : float
Temperature of H2O [K]
Returns
-------
K_w_G : float
Notes
-----
gamma0 = 6.141500E-1;
gamma1 = 4.825133E4;
gamma2 = -6.770793E4;
gamma3 = 1.010210E7
Examples
--------
>>> Kweq_IAPWS_gas(800)
1.4379721554798815e-61
References
----------
.. [1] Bandura, Andrei V., and Serguei N. Lvov. "The Ionization Constant
of Water over Wide Ranges of Temperature and Density." Journal of Physical
and Chemical Reference Data 35, no. 1 (March 1, 2006): 15-30.
doi:10.1063/1.1928231
'''
gamma0 = 6.141500E-1
gamma1 = 4.825133E4
gamma2 = -6.770793E4
gamma3 = 1.010210E7
K_w_G = 10**(-1*(gamma0 + gamma1/T + gamma2/T**2 + gamma3/T**3))
return K_w_G
def Kweq_IAPWS(T, rho_w):
r'''Calculates equilibrium constant for OH- and H+ in water, according to
[1]_.
This is the most recent formulation available.
.. math::
Q = \rho \exp(\alpha_0 + \alpha_1 T^{-1} + \alpha_2 T^{-2} \rho^{2/3})
- \log_{10} K_w = -2n \left[ \log_{10}(1+Q) - \frac{Q}{Q+1} \rho
(\beta_0 + \beta_1 T^{-1} + \beta_2 \rho) \right]
-\log_{10} K_w^G + 2 \log_{10} \frac{18.015268}{1000}
Parameters
----------
T : float
Temperature of water [K]
rho_w : float
Density of water at temperature and pressure [kg/m^3]
Returns
-------
Kweq : float
Ionization constant of water, [-]
Notes
-----
Formulation is in terms of density in g/cm^3; density
is converted internally.
n = 6;
alpha0 = -0.864671;
alpha1 = 8659.19;
alpha2 = -22786.2;
beta0 = 0.642044;
beta1 = -56.8534;
beta2 = -0.375754
Examples
--------
Example from IAPWS check:
>>> -1*log10(Kweq_IAPWS(600, 700))
11.203153057603775
References
----------
.. [1] Bandura, Andrei V., and Serguei N. Lvov. "The Ionization Constant
of Water over Wide Ranges of Temperature and Density." Journal of Physical
and Chemical Reference Data 35, no. 1 (March 1, 2006): 15-30.
doi:10.1063/1.1928231
'''
K_w_G = Kweq_IAPWS_gas(T)
rho_w = rho_w/1000.
n = 6
alpha0 = -0.864671
alpha1 = 8659.19
alpha2 = -22786.2
beta0 = 0.642044
beta1 = -56.8534
beta2 = -0.375754
Q = rho_w*exp(alpha0 + alpha1/T + alpha2/T**2*rho_w**(2/3.))
K_w = 10**(-1*(-2*n*(log10(1+Q)-Q/(Q+1) * rho_w *(beta0 + beta1/T + beta2*rho_w)) -
log10(K_w_G) + 2*log10(18.015268/1000) ))
return K_w
| [
"[email protected]"
] | |
dad7e56953388c605ffb3cee21d10c587cc1f059 | 3002ce1c3a5628386fe747c8f1733b2f482780b9 | /tests/densities/test_posterior_gp_classification_ard.py | ce104a3bdc897f1397b9c19df1fb5cae0676e0e7 | [] | no_license | exord/kernel_hmc | eb74f58d4c4a5a121f7383ba11dabfc1f1c95220 | f74e9eafe5637c4c95af1823d629140ca3b4b909 | refs/heads/master | 2020-03-19T10:28:05.767470 | 2018-06-06T19:01:04 | 2018-06-06T19:01:04 | 136,373,212 | 0 | 0 | null | 2018-06-06T18:54:42 | 2018-06-06T18:54:42 | null | UTF-8 | Python | false | false | 674 | py | # depends on shogun, which might not be available
from nose import SkipTest
import numpy as np
try:
from kernel_hmc.densities.posterior_gp_classification_ard import GlassPosterior
glass_available = True
except ImportError:
glass_available = False
def test_glass_posterior_setup_execute():
if not glass_available:
raise SkipTest("Shogun not available")
GlassPosterior().set_up()
def test_glass_posterior_log_pdf_execute():
if not glass_available:
raise SkipTest("Shogun not available")
D = 9
theta = np.random.randn(D)
target = GlassPosterior()
target.set_up()
target.log_pdf(theta)
| [
"[email protected]"
] | |
552b996cc76cd62cf2dd0b73ebf1972aec0af841 | 988176bcdae841e08106b0fe5cf07aabbc210c83 | /minimum sum of the maximum elements obtained by considering all consecutive pairs of adjacent elements after rearranging the array suitably.py | c871699196e8fd15530261a7480607de06e2522f | [] | no_license | gopiprasad008/GUVI_CODEKATA_PYTHON_CODE | ce1a63c7eea2a099c01748162c1deb47172dcd0a | 78f374e344df25aab181408d8f41b3ebe03b34ef | refs/heads/master | 2023-03-16T00:27:31.539524 | 2020-05-16T11:46:08 | 2020-05-16T11:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from itertools import permutations
n = int(input())
l = [int(x) for x in input().split()]
n = len(l)
m = list(permutations(l,len(l)))
a = 0
for i in range(len(m)):
b = 0
for j in range(len(m[i])-1):
b += max(m[i][j], m[i][j+1])
if b > a:
a = b
print(a)
| [
"[email protected]"
] | |
087a01950facd0b6e12f0c2c3ec8eeef6135feb5 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/response/KoubeiTradeOrderEnterpriseQueryResponse.py | d46129cc828bcc061ec6d475c1359df544675c21 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 4,551 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiTradeOrderEnterpriseQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiTradeOrderEnterpriseQueryResponse, self).__init__()
self._buyer_user_id = None
self._ext_info = None
self._merchant_subsidy_amount = None
self._order_no = None
self._order_product = None
self._out_order_no = None
self._partner_id = None
self._real_amount = None
self._seller_id = None
self._shop_id = None
self._status = None
self._subject = None
self._subsidy_amount = None
self._total_amount = None
@property
def buyer_user_id(self):
return self._buyer_user_id
@buyer_user_id.setter
def buyer_user_id(self, value):
self._buyer_user_id = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def merchant_subsidy_amount(self):
return self._merchant_subsidy_amount
@merchant_subsidy_amount.setter
def merchant_subsidy_amount(self, value):
self._merchant_subsidy_amount = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def order_product(self):
return self._order_product
@order_product.setter
def order_product(self, value):
self._order_product = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def real_amount(self):
return self._real_amount
@real_amount.setter
def real_amount(self, value):
self._real_amount = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def subsidy_amount(self):
return self._subsidy_amount
@subsidy_amount.setter
def subsidy_amount(self, value):
self._subsidy_amount = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def parse_response_content(self, response_content):
response = super(KoubeiTradeOrderEnterpriseQueryResponse, self).parse_response_content(response_content)
if 'buyer_user_id' in response:
self.buyer_user_id = response['buyer_user_id']
if 'ext_info' in response:
self.ext_info = response['ext_info']
if 'merchant_subsidy_amount' in response:
self.merchant_subsidy_amount = response['merchant_subsidy_amount']
if 'order_no' in response:
self.order_no = response['order_no']
if 'order_product' in response:
self.order_product = response['order_product']
if 'out_order_no' in response:
self.out_order_no = response['out_order_no']
if 'partner_id' in response:
self.partner_id = response['partner_id']
if 'real_amount' in response:
self.real_amount = response['real_amount']
if 'seller_id' in response:
self.seller_id = response['seller_id']
if 'shop_id' in response:
self.shop_id = response['shop_id']
if 'status' in response:
self.status = response['status']
if 'subject' in response:
self.subject = response['subject']
if 'subsidy_amount' in response:
self.subsidy_amount = response['subsidy_amount']
if 'total_amount' in response:
self.total_amount = response['total_amount']
| [
"[email protected]"
] | |
0fa7fd97a12aa5fda184f563ef020c56e3b9b228 | 3209e66f38b23400916296daa022be3f19ab4f98 | /venv/bin/easy_install-3.8 | b0ac408f2f89f56349c3aee83a8c3b68105f9ed6 | [] | no_license | surajbeston/multi_tenant | 5fd5ae88e6585592fa8f2a34f19591aad1febf6d | 205c7360cb58e32ec53387ab222a7c28d82b426b | refs/heads/master | 2023-06-02T16:50:42.931252 | 2021-06-28T18:22:06 | 2021-06-28T18:22:06 | 380,757,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | 8 | #!/home/suraj/projects/work/multi_tenant/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
55a28b610c9796ee5d9f9724c9a0bfe4eb09061c | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4M/4M-4J_wat_20Abox/set_3.py | 4e1279d74cc1ddc180be8d4b21bde67aa12bb920 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4M/wat_20Abox/ti_one-step/4M_4J/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_3.in'
temp_pbs = filesdir + 'temp_3.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_3.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_3.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
b84e8238b02903cb56003a86f49f4d732686de63 | 2ec26d004a653c0576594e48ac13dd71f539b30a | /gist_dump/sine_destruction.py | 48a1342ece579d42dc108367e41efe8f91e1bd2b | [] | no_license | kastnerkyle/research_megarepo | 6aca5b2c3b2413e0def1093b23f2826e3e7e5e97 | ab182667650fd59b99f75d4b599d7ace77a3f30b | refs/heads/master | 2021-01-17T20:31:52.250050 | 2016-12-27T01:28:54 | 2016-12-27T01:28:54 | 68,341,074 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Author: Kyle Kastner
# License: BSD 3-clause
import matplotlib.pyplot as plt
import numpy as np
fs = 100 # sample rate of 100 samples / sec, with max f 50
f = 5 # 5 Hz frequency
samples = 25 # .25 seconds of samples @ 100 samples / sec
x = np.arange(samples)
y1 = np.sin(2 * np.pi * f * x / fs + .5 * np.pi)
y2 = np.sin(2 * np.pi * f * x / fs + -.5 * np.pi)
plt.plot(y1)
plt.plot(y2)
plt.plot(y1 + y2)
plt.show() | [
"[email protected]"
] | |
cf9c444fa59eb0b67c60813865bf38503df80ad9 | e728a7b5447c4ca03ba799bec61459528f30fd88 | /esvi/model.py | 6effe6136b1881fe62efa747a7f8180ffab43f4b | [] | no_license | reritom/Esvi | deb75c0ca4b17494ed80adc3b735be008e0b3352 | 1e3242c61ec86af7b7479cd71f12a8677fcbde1f | refs/heads/master | 2020-03-20T02:08:01.695643 | 2018-11-04T18:35:52 | 2018-11-04T18:35:52 | 137,100,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,492 | py | from esvi import fields
from esvi.query import Query
from esvi.model_instance import ModelInstance
from esvi.query_executor import QueryExecutor
from esvi.model_set import ModelSet
from typing import Optional
class Model():
"""
This class is to be inherited by child models. The static methods for interacting with the DB executor also call __new__
so that the child attributes can be retrieved without the child class needing to be instanciated first.
"""
child_retrieved = False
def __new__(cls, internal=False):
print("In model new")
# To allow the classmethods to access child properties without an explicit instanciation, this method gets called by each
# classmethod. The following flag checks whether it has already been ran or not
if cls.child_retrieved == True and internal == True:
return
# Initialise the model name
cls.model_name = getattr(cls, 'model_name') if hasattr(cls, 'model_name') else cls.__name__ + "Model"
# Initialise the fields
cls.model_fields = dict()
# Primary Key flag
pk_flag = 0
# Here we grab any fields from the child class attributes
for value in dir(cls):
class_attribute = getattr(cls, value)
if hasattr(class_attribute, '__class__') and class_attribute.__class__.__base__ == fields.BaseField:
cls.model_fields[value] = class_attribute
if class_attribute.is_primary():
cls.primary_key = value
pk_flag += 1
if pk_flag is not 1:
raise Exception("Model {0} is missing a primary key field".format(cls.model_name))
cls.child_retrieved = True
cls.executor = QueryExecutor()
return cls
@classmethod
def get_model_name(cls) -> str:
"""
Return the model name from the child
"""
Model.__new__(cls, internal=True)
return cls.model_name
@classmethod
def get_primary_key(cls) -> str:
"""
Return the model name from the child
"""
Model.__new__(cls, internal=True)
return cls.primary_key
@classmethod
def get_fields(cls) -> dict:
"""
Return a dictionary with the field names and their field classes
"""
Model.__new__(cls, internal=True)
return cls.model_fields
@classmethod
def _initialise_in_db(cls) -> None:
"""
This will add the model definition to the DB
"""
Model.__new__(cls, internal=True)
# Here we create the query and pass it to the executor
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="initialise")
response = cls.executor.execute(query)
@classmethod
def _get_defition_from_db(cls) -> None:
"""
Retrieves the model fields from the DB in a list of field names in the correct order
"""
Model.__new__(cls, internal=True)
# Here we create the query and pass it to the executor
query = Query(model_name=cls.model_name, model_fields=None, action="definition")
response = cls.executor.execute(query)
@classmethod
def create(cls, **kwargs) -> ModelInstance:
"""
Create a model item in the DB
"""
Model.__new__(cls, internal=True)
# Initialise the content of this model
content = dict()
# Here we validate that the model is being initialised with enough information
for field_name, definition in cls.model_fields.items():
if field_name in kwargs:
# Check if it is in the kwargs
definition.validate(kwargs[field_name])
print("Field name {} is class {}".format(field_name, kwargs[field_name].__class__.__name__))
# If it is a foreign key (the value is a ModelInstance)
if isinstance(kwargs[field_name], ModelInstance):
print("Is model instance")
# We convert the value to the primary key and primary value
primary_key = kwargs[field_name].get_primary_key()
print("Primary key is {}".format(primary_key))
content[primary_key] = kwargs[field_name].get(primary_key)
continue
content[field_name] = kwargs[field_name]
elif definition.has_default():
# Check if it has a default value
content[field_name] = definition.get_default()
else:
raise Exception("{} missing as parameter and has no default".format(field_name))
# Here we create the query and pass it to the executor
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="create", content=content)
response = cls.executor.execute(query)
return ModelInstance(model_name=cls.model_name, model_fields=cls.model_fields, model_content=response) if response else None
@classmethod
def retrieve(cls, primary_key_value) -> Optional[ModelInstance]:
"""
Retrieve a single model by primary key
"""
Model.__new__(cls, internal=True)
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="retrieve", content=primary_key_value)
response = cls.executor.execute(query)
return ModelInstance(model_name=cls.model_name, model_fields=cls.model_fields, model_content=response) if response else None
@classmethod
def retrieve_all(cls) -> ModelSet:
"""
Retrieve all of the model items from the db and returns them in a model set
"""
Model.__new__(cls, internal=True)
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="all")
response = cls.executor.execute(query)
print("Retrieve all response is {}".format(response))
return ModelSet([ModelInstance(model_name=cls.model_name, model_fields=cls.model_fields, model_content=i) for i in response])
@classmethod
def filter(cls, **kwargs) -> ModelSet:
Model.__new__(cls, internal=True)
filters = ['_less_or_equal',
'_greater_or_equal',
'equal',
'_less_than',
'_greater_than',
'between_inc',
'between',
'not']
pass
| [
"[email protected]"
] | |
18cb063d140ca52402076ae16b83ac2bbdaa92cb | 3a6a211ea0d32405497fbd6486c490bb147e25f9 | /third_party/pyasn1_modules/pyasn1_modules/rfc3779.py | aaf856e822b6d15286373124b860200a0e6768d6 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | catapult-project/catapult | e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0 | 53102de187a48ac2cfc241fef54dcbc29c453a8e | refs/heads/main | 2021-05-25T07:37:22.832505 | 2021-05-24T08:01:49 | 2021-05-25T06:07:38 | 33,947,548 | 2,032 | 742 | BSD-3-Clause | 2022-08-26T16:01:18 | 2015-04-14T17:49:05 | HTML | UTF-8 | Python | false | false | 2,750 | py | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# X.509 Extensions for IP Addresses and AS Identifiers
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc3779.txt
#
from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
# IP Address Delegation Extension
id_pe_ipAddrBlocks = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.7')
class IPAddress(univ.BitString):
pass
class IPAddressRange(univ.Sequence):
pass
IPAddressRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('min', IPAddress()),
namedtype.NamedType('max', IPAddress())
)
class IPAddressOrRange(univ.Choice):
pass
IPAddressOrRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('addressPrefix', IPAddress()),
namedtype.NamedType('addressRange', IPAddressRange())
)
class IPAddressChoice(univ.Choice):
pass
IPAddressChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('inherit', univ.Null()),
namedtype.NamedType('addressesOrRanges', univ.SequenceOf(componentType=IPAddressOrRange()))
)
class IPAddressFamily(univ.Sequence):
pass
IPAddressFamily.componentType = namedtype.NamedTypes(
namedtype.NamedType('addressFamily', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
namedtype.NamedType('ipAddressChoice', IPAddressChoice())
)
class IPAddrBlocks(univ.SequenceOf):
pass
IPAddrBlocks.componentType = IPAddressFamily()
# Autonomous System Identifier Delegation Extension
id_pe_autonomousSysIds = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.8')
class ASId(univ.Integer):
pass
class ASRange(univ.Sequence):
pass
ASRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('min', ASId()),
namedtype.NamedType('max', ASId())
)
class ASIdOrRange(univ.Choice):
pass
ASIdOrRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('id', ASId()),
namedtype.NamedType('range', ASRange())
)
class ASIdentifierChoice(univ.Choice):
pass
ASIdentifierChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('inherit', univ.Null()),
namedtype.NamedType('asIdsOrRanges', univ.SequenceOf(componentType=ASIdOrRange()))
)
class ASIdentifiers(univ.Sequence):
pass
ASIdentifiers.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('asnum', ASIdentifierChoice().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('rdi', ASIdentifierChoice().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
| [
"[email protected]"
] | |
08ef2eccd84d514f3ce0b256c869f809ca0f08a9 | 7caa438706a423dd9779a81f8345fcf1ec11e921 | /NXT-Python/pyglet-1.2.4/examples/soundspace/soundspace.py | 82d76b2cabf3419b1931d08ad73170c7c952cb6b | [
"BSD-3-Clause"
] | permissive | tamarinvs19/python-learning | 5dd2582f5dc504e19a53e9176677adc5170778b0 | 1e514ad7ca8f3d2e2f785b11b0be4d57696dc1e9 | refs/heads/master | 2021-07-15T13:23:24.238594 | 2021-07-08T07:07:21 | 2021-07-08T07:07:21 | 120,604,826 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,493 | py | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import math
import os
import pyglet
from pyglet.gl import *
import reader
pyglet.resource.path.append('res')
pyglet.resource.reindex()
# Check for AVbin
try:
from pyglet.media import avbin
except ImportError:
raise ImportError('AVbin is required for this example, see '
'http://code.google.com/p/avbin')
def disc(r, x, y, slices=20, start=0, end=2*math.pi):
d = (end - start) / (slices - 1)
s = start
points = [(x, y)] + [(x + r * math.cos(a*d+s), y + r * math.sin(a*d+s)) \
for a in range(slices)]
points = ((GLfloat * 2) * len(points))(*points)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, points)
glDrawArrays(GL_TRIANGLE_FAN, 0, len(points))
glPopClientAttrib()
def circle(r, x, y, slices=20):
d = 2 * math.pi / slices
points = [(x + r * math.cos(a*d), y + r * math.sin(a*d)) \
for a in range(slices)]
points = ((GLfloat * 2) * len(points))(*points)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, points)
glDrawArrays(GL_LINE_LOOP, 0, len(points))
glPopClientAttrib()
def orientation_angle(orientation):
return math.atan2(orientation[2], orientation[0])
class Handle(object):
tip = ''
def __init__(self, player):
self.player = player
def hit_test(self, x, y, z):
dx, dy, dz = [a - b for a, b in zip(self.pos(), (x, y, z))]
if dx * dx + dy * dy + dz * dz < self.radius * self.radius:
return -dx, -dy, -dz
def draw(self):
pass
def begin_drag(self, window, offset):
self.win = window
self.offset = offset
return self
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self)
class LabelHandle(Handle):
def __init__(self, player):
super(LabelHandle, self).__init__(player)
self.text = pyglet.text.Label('', font_size=10, color=(0, 0, 0, 255),
anchor_y='top', anchor_x='center')
def hit_test(self, x, y, z):
return None
def draw(self):
if hasattr(self.player, 'label'):
x, _, y = self.player.position
# ech. fudge scale back to 1
mat = (GLfloat * 16)()
glGetFloatv(GL_MODELVIEW_MATRIX, mat)
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(1/mat[0], 1/mat[5], 1/mat[10])
glTranslatef(0, -5, 0)
self.text.text = self.player.label
self.text.draw()
glPopMatrix()
class PositionHandle(Handle):
tip = 'position'
radius = .3
def draw(self):
glPushMatrix()
glTranslatef(self.player.position[0], self.player.position[2], 0)
glColor3f(1, 0, 0)
glBegin(GL_TRIANGLES)
glVertex2f(0, self.radius)
glVertex2f(-self.radius * math.sqrt(3) / 2, -.5 * self.radius)
glVertex2f(self.radius * math.sqrt(3) / 2, -.5 * self.radius)
glEnd()
glPopMatrix()
def pos(self):
return self.player.position
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
pos = self.win.mouse_transform(x, y)
self.player.position = \
(pos[0] - self.offset[0],
pos[1] - self.offset[1],
pos[2] - self.offset[2])
class OrientationHandle(Handle):
radius = .1
length = 1.5
def pos(self):
x, _, z = self.player.position
dir = self.get_orientation()
sz = math.sqrt(dir[0] ** 2 + dir[1] ** 2 + dir[2] ** 2) or 1
if sz != 0:
x += dir[0] / sz * self.length
z += dir[2] / sz * self.length
return x, 0, z
def draw(self):
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
px, _, py = self.player.position
x, _, y = self.pos()
# Dashed line
glColor3f(.3, .3, .3)
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, 0x7777)
glBegin(GL_LINES)
glVertex2f(px, py)
glVertex2f(x, y)
glEnd()
# This handle (orientation)
glColor3f(1, 1, 0)
disc(self.radius, x, y)
glPopAttrib()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
self.set_orientation(
(hx - self.offset[0] - px,
hy - self.offset[1] - py,
hz - self.offset[2] - pz))
class ConeOrientationHandle(OrientationHandle):
tip = 'cone_orientation'
def get_orientation(self):
return self.player.cone_orientation
def set_orientation(self, orientation):
self.player.cone_orientation = orientation
class ForwardOrientationHandle(OrientationHandle):
tip = 'forward_orientation'
def get_orientation(self):
return self.player.forward_orientation
def set_orientation(self, orientation):
self.player.forward_orientation = orientation
class ConeAngleHandle(Handle):
radius = .1
def pos(self):
px, py, pz = self.player.position
angle = orientation_angle(self.player.cone_orientation)
angle += self.get_angle() * math.pi / 180. / 2
x = math.cos(angle) * self.length
z = math.sin(angle) * self.length
return px + x, py, pz + z
def draw(self):
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
# Fill
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4f(*self.fill_color)
px, _, py = self.player.position
angle = orientation_angle(self.player.cone_orientation)
a = self.get_angle() * math.pi / 180.
disc(self.length, px, py,
start=angle - a/2,
end=angle + a/2)
# Handle
x, _, y = self.pos()
glColor4f(*self.color)
disc(self.radius, x, y)
glPopAttrib()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
angle = orientation_angle(self.player.cone_orientation)
hangle = orientation_angle((hx - px, hy - py, hz - pz))
if hangle < angle:
hangle += math.pi * 2
res = min(max((hangle - angle) * 2, 0), math.pi * 2)
self.set_angle(res * 180. / math.pi)
class ConeInnerAngleHandle(ConeAngleHandle):
tip = 'cone_inner_angle'
length = 1.
color = (.2, .8, .2, 1)
fill_color = (0, 1, 0, .1)
def get_angle(self):
return self.player.cone_inner_angle
def set_angle(self, angle):
self.player.cone_inner_angle = angle
class ConeOuterAngleHandle(ConeAngleHandle):
tip = 'cone_outer_angle'
length = 1.2
color = (.2, .2, .8, 1)
fill_color = (0, 0, 1, .1)
def get_angle(self):
return self.player.cone_outer_angle
def set_angle(self, angle):
self.player.cone_outer_angle = angle
class MoreHandle(Handle):
tip = 'More...'
radius = .2
open = False
open_width = 1.5
open_height = 1.5
def pos(self):
x, y, z = self.player.position
return x + 1, y, z + 1
def draw(self):
x, _, z = self.pos()
if self.open:
x -= .2
z += .2
glPushAttrib(GL_ENABLE_BIT)
glEnable(GL_BLEND)
glColor4f(1, 1, 1, .8)
glBegin(GL_QUADS)
glVertex2f(x, z)
glVertex2f(x + self.open_width, z)
glVertex2f(x + self.open_width, z - self.open_height)
glVertex2f(x, z - self.open_height)
glEnd()
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glColor4f(0, 0, 0, 1)
glBegin(GL_QUADS)
glVertex2f(x, z)
glVertex2f(x + self.open_width, z)
glVertex2f(x + self.open_width, z - self.open_height)
glVertex2f(x, z - self.open_height)
glEnd()
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopAttrib()
else:
glColor3f(1, 1, 1)
disc(self.radius, x, z)
glColor3f(0, 0, 0)
circle(self.radius, x, z)
r = self.radius - 0.1
glBegin(GL_LINES)
glVertex2f(x - r, z)
glVertex2f(x + r, z)
glVertex2f(x, z - r)
glVertex2f(x, z + r)
glEnd()
def begin_drag(self, window, offset):
self.open = True
self.win = window
self.win.set_more_player_handles(self.player)
return self
def on_mouse_press(self, x, y, button, modifiers):
x, y, z = self.win.mouse_transform(x, y)
for handle in self.win.more_handles:
if handle.hit_test(x, y, z):
return
self.win.set_more_player_handles(None)
self.win.remove_handlers(self)
self.open = False
def on_mouse_release(self, x, y, button, modifiers):
pass
class SliderHandle(Handle):
length = 1.
width = .05
radius = .1
def __init__(self, player, x, z):
super(SliderHandle, self).__init__(player)
self.x = x
self.z = z
def pos(self):
x, y, z = self.player.position
x += self.x + self.get_value() * self.length
z += self.z
return x, y, z
def draw(self):
x = self.x + self.player.position[0]
z = self.z + self.player.position[2]
# Groove
glColor3f(.5, .5, .5)
glBegin(GL_QUADS)
glVertex2f(x, z - self.width/2)
glVertex2f(x + self.length, z - self.width/2)
glVertex2f(x + self.length, z + self.width/2)
glVertex2f(x, z + self.width/2)
glEnd()
# Thumb
x, _, z = self.pos()
glColor3f(.2, .2, .2)
disc(self.radius, x, z)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
value = float(hx - px - self.x) / self.length
value = min(max(value, 0), 1)
self.set_value(value)
class VolumeHandle(SliderHandle):
tip = 'volume'
def __init__(self, player):
super(VolumeHandle, self).__init__(player, 1, .9)
def get_value(self):
return self.player.volume
def set_value(self, value):
self.player.volume = value
class ListenerVolumeHandle(SliderHandle):
tip = 'volume'
def __init__(self, player):
super(ListenerVolumeHandle, self).__init__(player, -.5, -1)
def get_value(self):
return self.player.volume
def set_value(self, value):
self.player.volume = value
class MinDistanceHandle(SliderHandle):
tip = 'min_distance'
def __init__(self, player):
super(MinDistanceHandle, self).__init__(player, 1, .6)
def get_value(self):
return self.player.min_distance / 5.
def set_value(self, value):
self.player.min_distance = value * 5.
class MaxDistanceHandle(SliderHandle):
tip = 'max_distance'
def __init__(self, player):
super(MaxDistanceHandle, self).__init__(player, 1, .3)
def get_value(self):
return min(self.player.max_distance / 5., 1.0)
def set_value(self, value):
self.player.max_distance = value * 5.
class ConeOuterGainHandle(SliderHandle):
tip = 'cone_outer_gain'
def __init__(self, player):
super(ConeOuterGainHandle, self).__init__(player, 1, 0)
def get_value(self):
return self.player.cone_outer_gain
def set_value(self, value):
self.player.cone_outer_gain = value
class SoundSpaceWindow(pyglet.window.Window):
def __init__(self, **kwargs):
kwargs.update(dict(
caption='Sound Space',
resizable=True,
))
super(SoundSpaceWindow, self).__init__(**kwargs)
self.players = []
self.handles = []
self.more_handles = []
listener = pyglet.media.get_audio_driver().get_listener()
self.handles.append(PositionHandle(listener))
self.handles.append(ForwardOrientationHandle(listener))
self.handles.append(ListenerVolumeHandle(listener))
self.handles.append(LabelHandle(listener))
self.tip = pyglet.text.Label('', font_size=10, color=(0, 0, 0, 255),
anchor_y='top', anchor_x='center')
self.tip_player = None
# pixels per unit
self.zoom = 40
self.tx = self.width/2
self.ty = self.height/2
def add_player(self, player):
self.players.append(player)
self.handles.append(PositionHandle(player))
self.handles.append(ConeOrientationHandle(player))
self.handles.append(ConeInnerAngleHandle(player))
self.handles.append(ConeOuterAngleHandle(player))
self.handles.append(LabelHandle(player))
self.handles.append(MoreHandle(player))
def set_more_player_handles(self, player):
if player:
self.more_handles = [
VolumeHandle(player),
MinDistanceHandle(player),
MaxDistanceHandle(player),
ConeOuterGainHandle(player),
]
else:
self.more_handles = []
def draw_background(self):
glLoadIdentity()
glPushAttrib(GL_CURRENT_BIT)
glColor3f(1, 1, 1)
glBegin(GL_LINES)
for i in range(0, self.width, self.zoom):
glVertex2f(i, 0)
glVertex2f(i, self.height)
for i in range(0, self.height, self.zoom):
glVertex2f(0, i)
glVertex2f(self.width, i)
glEnd()
glPopAttrib()
def camera_transform(self):
glLoadIdentity()
glTranslatef(self.tx, self.ty, 0)
glScalef(self.zoom, self.zoom, 1)
def mouse_transform(self, x, y):
return (float(x - self.tx) / self.zoom,
0,
float(y - self.ty) / self.zoom)
def player_transform(self, player):
return (player.position[0] * self.zoom + self.tx,
player.position[2] * self.zoom + self.ty)
def hit_test(self, mouse_x, mouse_y):
x, y, z = self.mouse_transform(mouse_x, mouse_y)
for handle in self.more_handles[::-1] + self.handles[::-1]:
offset = handle.hit_test(x, y, z)
if offset:
return handle, offset
return None, None
def on_draw(self):
glClearColor(.8, .8, .8, 1)
self.clear()
self.draw_background()
glPushMatrix()
self.camera_transform()
for handle in self.handles + self.more_handles:
handle.draw()
glPopMatrix()
if self.tip_player:
player_pos = self.player_transform(self.tip_player)
self.tip.x = player_pos[0]
self.tip.y = player_pos[1] - 15
self.tip.draw()
def on_mouse_scroll(self, x, y, dx, dy):
self.zoom += dy * 10
self.zoom = min(max(self.zoom, 10), 100)
def on_mouse_press(self, x, y, button, modifiers):
handle, offset = self.hit_test(x, y)
if handle:
self.push_handlers(handle.begin_drag(self, offset))
else:
self.push_handlers(PanView(self))
def on_mouse_motion(self, x, y, dx, dy):
handle, offset = self.hit_test(x, y)
if handle:
self.tip.text = handle.tip
pos = self.player_transform(handle.player)
self.tip_player = handle.player
else:
self.tip.text = ''
class PanView(object):
def __init__(self, window):
self.win = window
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.win.tx += dx
self.win.ty += dy
if __name__ == '__main__':
# We swap Y and Z, moving to left-handed system
listener = pyglet.media.get_audio_driver().get_listener()
listener.up_orientation = (0, -1, 0)
# Start facing up (er, forwards)
listener.forward_orientation = (0, 0, 1)
listener.label = 'Listener'
w = SoundSpaceWindow()
r = reader.SpaceReader(w)
r.read(pyglet.resource.file('space.txt'))
player_group = pyglet.media.PlayerGroup(w.players)
player_group.play()
pyglet.app.run()
| [
"[email protected]"
] | |
4eb2f293067b5f57a370ad4125140d766cdc0ad6 | 4374b25bd4d7ea4a8d890b08b68ae52ed93c5eaf | /neural_sp/bin/model_name.py | 2121447c1841b0f00c16ed8c596d9bf4f95d6c89 | [
"Apache-2.0"
] | permissive | smilelite/neural_sp | 61833dc20f6ddd36b21e55663f539929c69a9399 | 86fee124982f7483656aa6b8d5db3715fda12460 | refs/heads/master | 2023-04-06T23:31:19.258575 | 2021-04-06T16:02:11 | 2021-04-06T16:02:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,097 | py | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Set model name."""
import os
from neural_sp.bin.train_utils import load_config
def _define_encoder_name(dir_name, args):
if args.enc_type == 'tds':
from neural_sp.models.seq2seq.encoders.tds import TDSEncoder as module
elif args.enc_type == 'gated_conv':
from neural_sp.models.seq2seq.encoders.gated_conv import GatedConvEncoder as module
elif 'transformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.transformer import TransformerEncoder as module
elif 'conformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.conformer import ConformerEncoder as module
else:
from neural_sp.models.seq2seq.encoders.rnn import RNNEncoder as module
if hasattr(module, 'define_name'):
dir_name = module.define_name(dir_name, args)
else:
raise NotImplementedError(module)
return dir_name
def _define_decoder_name(dir_name, args):
if args.dec_type in ['transformer', 'transformer_xl']:
from neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder as module
elif args.dec_type in ['transformer_transducer', 'transformer_transducer_xl']:
from neural_sp.models.seq2seq.decoders.transformer_transducer import TransformerTransducer as module
elif args.dec_type in ['lstm_transducer', 'gru_transducer']:
from neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer as module
elif args.dec_type == 'asg':
from neural_sp.models.seq2seq.decoders.asg import ASGDecoder as module
else:
from neural_sp.models.seq2seq.decoders.las import RNNDecoder as module
if hasattr(module, 'define_name'):
dir_name = module.define_name(dir_name, args)
else:
raise NotImplementedError(module)
return dir_name
def _define_lm_name(dir_name, args):
if 'gated_conv' in args.lm_type:
from neural_sp.models.lm.gated_convlm import GatedConvLM as module
elif args.lm_type == 'transformer':
from neural_sp.models.lm.transformerlm import TransformerLM as module
elif args.lm_type == 'transformer_xl':
from neural_sp.models.lm.transformer_xl import TransformerXL as module
else:
from neural_sp.models.lm.rnnlm import RNNLM as module
if hasattr(module, 'define_name'):
dir_name = module.define_name(dir_name, args)
else:
raise NotImplementedError(module)
return dir_name
def set_asr_model_name(args):
# encoder
dir_name = args.enc_type.replace('conv_', '')
dir_name = _define_encoder_name(dir_name, args)
if args.n_stacks > 1:
dir_name += '_stack' + str(args.n_stacks)
else:
dir_name += '_' + args.subsample_type + str(args.subsample_factor)
if args.sequence_summary_network:
dir_name += '_ssn'
# decoder
if args.ctc_weight < 1:
dir_name = _define_decoder_name(dir_name, args)
# optimization
dir_name += '_' + args.optimizer
if args.optimizer == 'noam':
dir_name += '_lr' + str(args.lr_factor)
else:
dir_name += '_lr' + str(args.lr)
dir_name += '_bs' + str(args.batch_size)
if args.train_dtype in ["O0", "O1", "O2", "O3"]:
dir_name += '_' + args.train_dtype
# if args.shuffle_bucket:
# dir_name += '_bucket'
# if 'transformer' in args.enc_type or 'transformer' in args.dec_type:
# dir_name += '_' + args.transformer_param_init
# regularization
if args.lsm_prob > 0:
dir_name += '_ls' + str(args.lsm_prob)
if args.warmup_n_steps > 0:
dir_name += '_warmup' + str(args.warmup_n_steps)
if args.accum_grad_n_steps > 1:
dir_name += '_accum' + str(args.accum_grad_n_steps)
# LM integration
if args.lm_fusion:
dir_name += '_' + args.lm_fusion
# MTL
if args.mtl_per_batch:
if args.ctc_weight > 0:
dir_name += '_' + args.unit + 'ctc'
if args.bwd_weight > 0:
dir_name += '_' + args.unit + 'bwd'
for sub in ['sub1', 'sub2']:
if args.get('train_set_' + sub) is not None:
dir_name += '_' + args.get('unit_' + sub) + str(args.get('vocab_' + sub))
if args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'ctc'
if args.get(sub + '_weight', 0) - args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'fwd'
else:
if args.ctc_weight > 0:
dir_name += '_ctc' + str(args.ctc_weight)
if args.bwd_weight > 0:
dir_name += '_bwd' + str(args.bwd_weight)
for sub in ['sub1', 'sub2']:
if args.get(sub + '_weight', 0) > 0:
dir_name += '_' + args.get('unit_' + sub) + str(args.get('vocab_' + sub))
if args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'ctc%.1f' % args.get('ctc_weight_' + sub)
if args.get(sub + '_weight', 0) - args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'fwd%.2f' % (args.total_weight - args.get(sub + '_weight',
0) - args.get('ctc_weight_' + sub, 0))
if args.task_specific_layer:
dir_name += '_tsl'
# SpecAugment
if args.n_freq_masks > 0:
dir_name += '_' + str(args.freq_width) + 'FM' + str(args.n_freq_masks)
if args.n_time_masks > 0:
if args.adaptive_number_ratio > 0:
dir_name += '_pnum' + str(args.adaptive_number_ratio)
else:
dir_name += '_' + str(args.time_width) + 'TM' + str(args.n_time_masks)
if args.adaptive_size_ratio > 0:
dir_name += '_psize' + str(args.adaptive_size_ratio)
if args.input_noise_std > 0:
dir_name += '_Inoise'
if args.weight_noise_std > 0:
dir_name += '_Wnoise'
# contextualization
if args.discourse_aware:
dir_name += '_discourse'
if args.mem_len > 0:
dir_name += '_mem' + str(args.mem_len)
if args.bptt > 0:
dir_name += '_bptt' + str(args.bptt)
# Pre-training
if args.asr_init and os.path.isfile(args.asr_init):
conf_init = load_config(os.path.join(os.path.dirname(args.asr_init), 'conf.yml'))
dir_name += '_' + conf_init['unit'] + 'pt'
if args.freeze_encoder:
dir_name += '_encfreeze'
if args.lm_init:
dir_name += '_lminit'
# knowledge distillation
if args.teacher:
dir_name += '_KD' + str(args.soft_label_weight)
if args.teacher_lm:
dir_name += '_lmKD' + str(args.soft_label_weight)
# MBR training
if args.mbr_training:
dir_name += '_MBR' + str(args.recog_beam_width) + 'best'
dir_name += '_ce' + str(args.mbr_ce_weight) + '_smooth' + str(args.recog_softmax_smoothing)
if args.n_gpus > 1:
dir_name += '_' + str(args.n_gpus) + 'GPU'
return dir_name
def set_lm_name(args):
dir_name = ''
dir_name = _define_lm_name(dir_name, args)
# optimization
dir_name += '_' + args.optimizer
if args.optimizer == 'noam':
dir_name += '_lr' + str(args.lr_factor)
else:
dir_name += '_lr' + str(args.lr)
dir_name += '_bs' + str(args.batch_size)
if args.train_dtype in ["O0", "O1", "O2", "O3"]:
dir_name += '_' + args.train_dtype
dir_name += '_bptt' + str(args.bptt)
# regularization
dir_name += '_dropI' + str(args.dropout_in) + 'H' + str(args.dropout_hidden)
if args.get('dropout_layer', 0) > 0:
dir_name += 'Layer' + str(args.dropout_layer)
if args.lsm_prob > 0:
dir_name += '_ls' + str(args.lsm_prob)
if args.warmup_n_steps > 0:
dir_name += '_warmup' + str(args.warmup_n_steps)
if args.accum_grad_n_steps > 1:
dir_name += '_accum' + str(args.accum_grad_n_steps)
if args.backward:
dir_name += '_bwd'
if args.shuffle:
dir_name += '_shuffle'
if args.serialize:
dir_name += '_serialize'
return dir_name
| [
"[email protected]"
] | |
4a94d4e0f476fcbb73a0a653a656166d0438ab10 | 2be678ddc49b9ce9c2f7bd198d12b6be94374ddd | /precise/scripts/graph.py | 14cf83e4ec816dd3eabc2a3e18c0d72b66f281b2 | [
"Apache-2.0"
] | permissive | andreselizondo-adestech/mycroft-precise | 2a7f08c7d74d39a1455ea1c15ded47fdedc89096 | 0e0ac5b8b14ff6a6ecffd300c40049131990e8c9 | refs/heads/dev | 2021-05-21T01:21:30.066505 | 2020-08-19T19:37:05 | 2020-08-19T19:37:05 | 252,484,092 | 3 | 1 | Apache-2.0 | 2020-08-19T19:32:00 | 2020-04-02T14:49:40 | Python | UTF-8 | Python | false | false | 5,814 | py | #!/usr/bin/env python3
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
from functools import partial
from os.path import basename, splitext
from prettyparse import Usage
from typing import Callable, Tuple
from precise.network_runner import Listener
from precise.params import inject_params, pr
from precise.scripts.base_script import BaseScript
from precise.stats import Stats
from precise.threshold_decoder import ThresholdDecoder
from precise.train_data import TrainData
def get_thresholds(points=100, power=3) -> list:
"""Run a function with a series of thresholds between 0 and 1"""
return [(i / (points + 1)) ** power for i in range(1, points + 1)]
class CachedDataLoader:
"""
Class for reloading train data every time the params change
Args:
loader: Function that loads the train data (something that calls TrainData.load)
"""
def __init__(self, loader: Callable):
self.prev_cache = None
self.data = None
self.loader = loader
def load_for(self, model: str) -> Tuple[list, list]:
"""Injects the model parameters, reloading if they changed, and returning the data"""
inject_params(model)
if self.prev_cache != pr.vectorization_md5_hash():
self.prev_cache = pr.vectorization_md5_hash()
self.data = self.loader()
return self.data
def load_plt():
try:
import matplotlib.pyplot as plt
return plt
except ImportError:
print('Please install matplotlib first')
raise SystemExit(2)
def calc_stats(model_files, loader, use_train, filenames):
model_data = {}
for model in model_files:
train, test = loader.load_for(model)
inputs, targets = train if use_train else test
print('Running network...')
predictions = Listener.find_runner(model)(model).predict(inputs)
print(inputs.shape, targets.shape)
print('Generating statistics...')
stats = Stats(predictions, targets, filenames)
print('\n' + stats.counts_str() + '\n\n' + stats.summary_str() + '\n')
model_name = basename(splitext(model)[0])
model_data[model_name] = stats
return model_data
class GraphScript(BaseScript):
usage = Usage('''
Show ROC curves for a series of models
...
:-t --use-train
Evaluate training data instead of test data
:-nf --no-filenames
Don't print out the names of files that failed
:-r --resolution int 100
Number of points to generate
:-p --power float 3.0
Power of point distribution
:-l --labels
Print labels attached to each point
:-o --output-file str -
File to write data instead of displaying it
:-i --input-file str -
File to read data from and visualize
...
''')
usage.add_argument('models', nargs='*', help='Either Keras (.net) or TensorFlow (.pb) models to test')
usage |= TrainData.usage
def __init__(self, args):
super().__init__(args)
if not args.models and not args.input_file and args.folder:
args.input_file = args.folder
if bool(args.models) == bool(args.input_file):
raise ValueError('Please specify either a list of models or an input file')
if not args.output_file:
load_plt() # Error early if matplotlib not installed
def run(self):
args = self.args
if args.models:
data = TrainData.from_both(args.tags_file, args.tags_folder, args.folder)
print('Data:', data)
filenames = sum(data.train_files if args.use_train else data.test_files, [])
loader = CachedDataLoader(partial(
data.load, args.use_train, not args.use_train, shuffle=False
))
model_data = calc_stats(args.models, loader, args.use_train, filenames)
else:
model_data = {
name: Stats.from_np_dict(data) for name, data in np.load(args.input_file)['data'].item().items()
}
for name, stats in model_data.items():
print('=== {} ===\n{}\n\n{}\n'.format(name, stats.counts_str(), stats.summary_str()))
if args.output_file:
np.savez(args.output_file, data={name: stats.to_np_dict() for name, stats in model_data.items()})
else:
plt = load_plt()
decoder = ThresholdDecoder(pr.threshold_config, pr.threshold_center)
thresholds = [decoder.encode(i) for i in np.linspace(0.0, 1.0, args.resolution)[1:-1]]
for model_name, stats in model_data.items():
x = [stats.false_positives(i) for i in thresholds]
y = [stats.false_negatives(i) for i in thresholds]
plt.plot(x, y, marker='x', linestyle='-', label=model_name)
if args.labels:
for x, y, threshold in zip(x, y, thresholds):
plt.annotate('{:.4f}'.format(threshold), (x, y))
plt.legend()
plt.xlabel('False Positives')
plt.ylabel('False Negatives')
plt.show()
main = GraphScript.run_main
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f6835052381793f2c861225e2220abd09398454e | 61dd3524e904ee055a761815239da55db26f03eb | /PYTHON-SALA DE AULA/Exercicios condicionais/exe-37.py | 42797fee163339669239668fce6cbf3207f3a018 | [
"Apache-2.0"
] | permissive | JaumVitor/HOMEWORK-PYTHON | 149e4cb6d10421d1e980dd5b75a92f87355582f8 | 7be3299f36af3a924fc5c6f4a63aeed0fd8fb38e | refs/heads/master | 2022-09-15T11:00:55.019562 | 2022-08-26T18:46:06 | 2022-08-26T18:46:06 | 256,103,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | print ('- salários até R$ 280,00 (incluindo) : aumento de 20%')
print ('- salários entre R$ 280,00 e R$ 700,00 : aumento de 15%')
print ('- salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%')
print ('-salários de R$ 1.500,00 em diante : aumento de 5%')
print ('='*43)
sal = float ( input ('Qual valor do salario ? '))
if (sal <= 280 ):
nsal1 = ( sal * 1.20 )
print ('='*43)
print ('-Aumento de 20 %')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal1))
elif (sal > 280 ) and ( sal <= 700 ):
nsal2 = ( sal * 1.15 )
print ('='*43)
print ('-Aumento de 15 %')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal2))
elif (sal > 750 ) and ( sal <= 1500 ):
nsal3 = ( sal * 1.10 )
print ('='*43)
print ('-Aumento de 10 %')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal3))
elif (sal > 1500 ):
nsal4 = ( sal * 1.5 )
print ('='*43)
print ('-Aumento de 5%')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal4))
| [
"[email protected]"
] | |
dde80391d1a289f17f39b3d1db3696e9b50a41ec | 530797702626216b6aebc8fa5b55fc0cb494ad3e | /cryptid/cryptid_maps_generator.py | 684018146052a3fe0e77e6788d3135746871f012 | [] | no_license | trung-hn/fun-stuffs | fbe3951bad6a12a5c703892268f0422640aa2232 | afadd2788e8eeff256e47a2c6a23ee6089bd2595 | refs/heads/master | 2023-08-31T20:55:28.662905 | 2023-08-29T20:38:54 | 2023-08-29T20:38:54 | 203,461,969 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,782 | py | #%%
import base64
import json
from selenium import webdriver
from pathlib import Path
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
import time
DRIVER_PATH = "D:\Workspace\sandbox\chromedriver.exe"
driver = webdriver.Chrome(executable_path=DRIVER_PATH)
driver.get("https://ospreypublishing.com/playcryptid/")
#%%
def download_map_to_path(driver, path):
# Save Image
canvas = driver.find_element(by=By.XPATH, value="//*[@id='mapCanvas']")
# get the canvas as a PNG base64 string
canvas_base64 = driver.execute_script(
"return arguments[0].toDataURL('image/png').substring(21);", canvas
)
# decode
canvas_png = base64.b64decode(canvas_base64)
Path(path.rsplit("/", 1)[0]).mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
f.write(canvas_png)
def save_clues_to_path(path, clues):
Path(path.rsplit("/", 1)[0]).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
json.dump(clues, f)
def save_text_to_path(path, clue):
Path(path.rsplit("/", 1)[0]).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(clue)
def save_clues_for_each_player(driver, folder, player_no):
clues = {}
for player in range(1, player_no + 1):
driver.find_element(by=By.XPATH, value="//*[@id='clueButton']").click()
time.sleep(1)
clue = driver.find_element(by=By.XPATH, value="//*[@id='clueText']")
clues[f"Player {player}:"] = clue.text
save_text_to_path(
folder + f"player {player} clue.txt",
clue.text,
)
driver.find_element(by=By.XPATH, value="//*[@id='clueButton']").click()
time.sleep(0.6)
save_clues_to_path(folder + "clues.json", clues)
return clues
# %%
visited = set()
drop_down = Select(driver.find_element(by=By.XPATH, value="//*[@id='ngfPlayers']"))
for order in range(1, 101):
for player_no in (2, 3, 4, 5):
drop_down.select_by_value(str(player_no))
time.sleep(0.5)
# Start Game
driver.find_element(by=By.XPATH, value="//*[@id='ngfStart']").click()
time.sleep(0.1)
try:
driver.find_element(
by=By.XPATH, value='//button[normalize-space()="OK"]'
).click()
except:
pass
time.sleep(0.5)
folder = f"data/Advance {player_no} players/Game {order}/"
download_map_to_path(driver, folder + "map.png")
# Save clues for each player
clues = save_clues_for_each_player(driver, folder, player_no)
jsonified_clues = json.dumps(clues)
if jsonified_clues in visited:
print(f"{order} already visited")
continue
visited.add(jsonified_clues)
# Get hint
time.sleep(1)
driver.find_element(
by=By.XPATH, value='//button[normalize-space()="Reveal Hint"]'
).click()
time.sleep(0.1)
driver.find_element(value="hint_confirm_yes").click()
time.sleep(0.4)
hint = driver.find_element(by=By.XPATH, value="//*[@id='hintText']")
save_text_to_path(folder + "hint.txt", hint.text)
# Get solution
driver.find_element(value="targetButton").click()
time.sleep(0.1)
driver.find_element(value="target_confirm_yes").click()
time.sleep(1)
download_map_to_path(driver, folder + "solution/solution.png")
# Quit
driver.find_element(by=By.XPATH, value="//*[@id='quitButton']").click()
time.sleep(0.4)
try:
driver.find_element(
by=By.XPATH, value="//*[@id='quit_confirm_yes']"
).click()
except:
pass
time.sleep(0.5)
# %%
| [
"[email protected]"
] | |
0f43d2c8b893f082d38427beb7a50cfa5047b97d | f60eb7d15ce3ca06e2db1dc0af8b3b87bed08c37 | /home/migrations/0026_auto_20170613_1726.py | c701f4e2b1b402be2ac5c422cd43ebada3d8feb6 | [] | no_license | wlminimal/epc | 96136f0c5f2b4ddc04fbc7e7b76d6a41c631ea26 | 2127a4e273a69a3ca0d5711fd1452c1bc5ab7590 | refs/heads/master | 2022-12-12T11:33:57.711869 | 2019-04-12T16:33:58 | 2019-04-12T16:33:58 | 92,700,181 | 0 | 0 | null | 2022-12-07T23:58:05 | 2017-05-29T02:20:33 | Python | UTF-8 | Python | false | false | 496 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-13 17:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0025_auto_20170613_1721'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='service_button_text',
field=models.CharField(default='See more Service info', max_length=50),
),
]
| [
"[email protected]"
] | |
54a0fca131604d36c4f3e8a1caf9b2e7d1d89f9f | a945bbf73016b1689a0dc9811171fac51fad0bba | /research/limited_gradient_smoothing/utils.py | 5638b9cceadc87fb45822807b1342e110ba9cef1 | [
"MIT"
] | permissive | jamietknight/AutoEq | a40cc47e5c19625191903c404cb785ac1ca3883b | 8ef7ffefcfe8786d318f59b2004ec68af09ed61d | refs/heads/master | 2023-01-14T21:09:16.405027 | 2020-11-29T12:49:00 | 2020-11-29T12:49:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,055 | py | # -*- coding: utf-8 -*-
import sys
from pathlib import Path
ROOT_PATH = Path().resolve().parent.parent
if str(ROOT_PATH) not in sys.path:
sys.path.insert(str(ROOT_PATH))
import numpy as np
import scipy
from frequency_response import FrequencyResponse
def limited_slope_plots(fr, limit):
fr.equalization = -fr.error
limited, smoothed, limited_forward, clipped_forward, limited_backward, clipped_backward, peak_inds, dip_inds, \
backward_start, protection_mask = limited_slope(fr.frequency, fr.equalization, limit)
x = fr.frequency.copy()
y = smoothed
# Plot graphs
fig, ax = fr.plot_graph(
show=False, raw=False, error=False, target=False, equalization_plot_kwargs={
'color': 'C2', 'linewidth': 1, 'label': 'Raw equalization', 'linestyle': 'dashed'
})
fig.set_size_inches(20, 9)
ax.plot(x, y, label='Smoothed equalization', color='C2')
ax.plot(x, limited, label='Limited', color='C1')
ax.fill_between(x, clipped_forward * -5, clipped_forward * 10, label='Limited left to right', color='blue',
alpha=0.1)
ax.fill_between(x, clipped_backward * -10, clipped_backward * 5, label='Limited right to left', color='red',
alpha=0.1)
ax.fill_between(x, protection_mask * -12, protection_mask * 12, label='Limitation-safe zone', color='limegreen',
alpha=0.2)
ax.scatter(x[peak_inds], y[peak_inds], color='red')
ax.scatter(x[backward_start], y[backward_start], 200, marker='<', label='Backward start', color='black')
ax.scatter(x[dip_inds], y[dip_inds], color='limegreen')
ax.legend()
return fig, ax
def limited_slope(x, y, limit):
"""Bi-directional slope limitation for a frequency response curve
Args:
x: frequencies
y: amplitudes
limit:
Returns:
"""
fr = FrequencyResponse(name='fr', frequency=x, raw=y)
# Smoothen data, heavily on treble to avoid problems in +10 kHz region
fr.smoothen_fractional_octave(window_size=1 / 12, treble_window_size=2, treble_f_lower=9000, treble_f_upper=11500)
# Copy data
x = fr.frequency.copy()
y = fr.smoothed.copy()
# Find peaks and notches
# TODO: these affect which regions are rejected
peak_inds, peak_props = scipy.signal.find_peaks(y, prominence=1)
dip_inds, dip_props = scipy.signal.find_peaks(-y, prominence=1)
limit_free_mask = protection_mask(y, dip_inds)
# Find backward start index
backward_start = find_backward_start(y, peak_inds, dip_inds) # TODO: backward start
# Find forward and backward limitations
# limited_forward is y but with slopes limited when traversing left to right
# clipped_forward is boolean mask for limited samples when traversing left to right
# limited_backward is found using forward algorithm but with flipped data
limited_forward, clipped_forward, regions_forward = limited_forward_slope(
x, y, limit, start_index=0, peak_inds=peak_inds, limit_free_mask=limit_free_mask)
limited_backward, clipped_backward, regions_backward = limited_backward_slope(
x, y, limit, start_index=backward_start, peak_inds=peak_inds, limit_free_mask=limit_free_mask)
# TODO: Find notches which are lower in level than adjacent notches
# TODO: Set detected notches as slope clipping free zones up to levels of adjacent notches
# Forward and backward limited curves are combined with min function
# Combination function is smoothed to get rid of hard kinks
limiter = FrequencyResponse(
name='limiter', frequency=x.copy(), raw=np.min(np.vstack([limited_forward, limited_backward]), axis=0))
limiter.smoothen_fractional_octave(window_size=1 / 5, treble_window_size=1 / 5)
#limiter.smoothed = limiter.raw.copy()
return limiter.smoothed.copy(), fr.smoothed.copy(), limited_forward, clipped_forward, limited_backward, clipped_backward, \
peak_inds, dip_inds, backward_start, limit_free_mask
def protection_mask(y, dip_inds):
"""Finds zones around dips which are lower than their adjacent dips. Zones extend to the lower level of the adjacent
dips.
Args:
x: frequencies
y: amplitudes
dip_inds: Indices of dips
Returns:
Boolean mask for limitation-free indices
"""
mask = np.zeros(len(y)).astype(bool)
if len(dip_inds) < 3:
return mask
# Find peaks which are lower in level than their adjacent dips
dip_levels = y[dip_inds]
# First row contains levels of previous dips
# Second row contains levels of current dips
# Third row contains levels of next dips
# First and last dips are ignored because they don't have both adjacent dips
stack = np.vstack([dip_levels[2:], dip_levels[1:-1], dip_levels[:-2]])
# Boolean mask for dips which are lower than their adjacent dips
null_mask = np.concatenate([[False], np.argmin(stack, axis=0) == 1, [False]])
# Indices of dips which are lower than their adjacent dips
null_inds = np.argwhere(null_mask)[:, 0]
if len(null_inds) < 1:
return mask
# First column is the level of the previous dip
# Second column is the level of the next dip
adjacent_dip_levels = np.vstack([dip_levels[null_inds - 1], dip_levels[null_inds + 1]])
adjacent_dip_levels = np.transpose(adjacent_dip_levels)
# Find indexes on both sides where the curve goes above the adjacent dips minimum level
for i in range(len(null_inds)):
dip_ind = dip_inds[null_inds[i]]
target_left = adjacent_dip_levels[i, 0]
target_right = adjacent_dip_levels[i, 1]
# TODO: Should left and right side targets be separate?
#target = np.min([target_left, target_right])
# TODO: Should target be where gradient reduces below certain threshold?
left_ind = np.argwhere(y[:dip_ind] >= target_left)[-1, 0] + 1
right_ind = np.argwhere(y[dip_ind:] >= target_right)
right_ind = right_ind[0, 0] + dip_ind - 1
mask[left_ind:right_ind + 1] = np.ones(right_ind - left_ind + 1).astype(bool)
return mask
def limited_backward_slope(x, y, limit, start_index=0, peak_inds=None, limit_free_mask=None):
"""Limits forwards slope of a frequency response curve while traversing backwards
Args:
x: frequencies
y: amplitudes
limit: maximum slope in dB / oct
start_index: Index where to start traversing, no limitations apply before this
peak_inds: Peak indexes. Regions will require to touch one of these if given.
limit_free_mask: Boolean mask for indices where limitation must not be applied
Returns:
limited: Limited curve
mask: Boolean mask for clipped indexes
regions: Clipped regions, one per row, 1st column is the start index, 2nd column is the end index (exclusive)
"""
start_index = len(x) - start_index - 1
if peak_inds is not None:
peak_inds = len(x) - peak_inds - 1
if limit_free_mask is not None:
limit_free_mask = np.flip(limit_free_mask)
limited_backward, clipped_backward, regions_backward = limited_forward_slope(
x, np.flip(y), limit, start_index=start_index, peak_inds=peak_inds, limit_free_mask=limit_free_mask)
limited_backward = np.flip(limited_backward)
clipped_backward = np.flip(clipped_backward)
regions_backward = len(x) - regions_backward - 1
return limited_backward, clipped_backward, regions_backward
def limited_forward_slope(x, y, limit, start_index=0, peak_inds=None, limit_free_mask=None):
"""Limits forwards slope of a frequency response curve
Args:
x: frequencies
y: amplitudes
limit: maximum slope in dB / oct
start_index: Index where to start traversing, no limitations apply before this
peak_inds: Peak indexes. Regions will require to touch one of these if given.
limit_free_mask: Boolean mask for indices where limitation must not be applied
Returns:
limited: Limited curve
mask: Boolean mask for clipped indexes
regions: Clipped regions, one per row, 1st column is the start index, 2nd column is the end index (exclusive)
"""
if peak_inds is not None:
peak_inds = np.array(peak_inds)
limited = []
clipped = []
regions = []
for i in range(len(x)):
if i <= start_index:
# No clipping before start index
limited.append(y[i])
clipped.append(False)
continue
# Calculate slope and local limit
slope = log_log_gradient(x[i], x[i - 1], y[i], limited[-1])
# Local limit is 25% of the limit between 8 kHz and 10 kHz
# TODO: limit 9 kHz notch 8 kHz to 11 kHz?
local_limit = limit / 4 if 8000 <= x[i] <= 11500 else limit
if slope > local_limit and (limit_free_mask is None or not limit_free_mask[i]):
# Slope between the two samples is greater than the local maximum slope, clip to the max
if not clipped[-1]:
# Start of clipped region
regions.append([i])
clipped.append(True)
# Add value with limited change
octaves = np.log(x[i] / x[i - 1]) / np.log(2)
limited.append(limited[-1] + local_limit * octaves)
else:
# Moderate slope, no need to limit
limited.append(y[i])
if clipped[-1]:
# Previous sample clipped but this one didn't, means it's the end of clipped region
# Add end index to the region
regions[-1].append(i + 1)
region_start = regions[-1][0]
if peak_inds is not None and not np.any(np.logical_and(peak_inds >= region_start, peak_inds < i)):
# None of the peak indices found in the current region, discard limitations
limited[region_start:i] = y[region_start:i]
clipped[region_start:i] = [False] * (i - region_start)
regions.pop()
clipped.append(False)
if len(regions) and len(regions[-1]) == 1:
regions[-1].append(len(x) - 1)
return np.array(limited), np.array(clipped), np.array(regions)
def log_log_gradient(f0, f1, g0, g1):
octaves = np.log(f1 / f0) / np.log(2)
gain = g1 - g0
return gain / octaves
def find_backward_start(y, peak_inds, notch_inds):
# Find starting index for the backward pass
if peak_inds[-1] > notch_inds[-1]:
# Last peak is a positive peak
# Find index on the right side of the peak where the curve crosses the left side minimum
backward_start = np.argwhere(y[peak_inds[-1]:] <= y[notch_inds[-1]])
if len(backward_start):
backward_start = backward_start[0, 0] + peak_inds[-1]
else:
backward_start = len(y) - 1
else:
# Last peak is a negative peak, start there
backward_start = notch_inds[-1]
return backward_start
| [
"[email protected]"
] | |
d35824f9c7bc1d9b5bdf9ab1821580ea4c94cc6e | 3d9640bf3fb1da00f2739424723fbf6d74b574c0 | /project/accounts/migrations/0002_auto_20200809_1536.py | 9bb3a316cc8f71e82a6646748c93e6a9df7fbec6 | [] | no_license | brahim024/django-user-auth | 265df0de18bdce4756c53c616ba097755175b519 | 41339d449f6d2abb07ab30e087234915ada59186 | refs/heads/master | 2022-12-22T06:13:02.973405 | 2020-09-26T00:08:58 | 2020-09-26T00:08:58 | 279,684,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.1 on 2020-08-09 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| [
"[email protected]"
] | |
bba7005497f25a02a0b5b5133051bdc931a6245d | 64327166debec734cdbdceed673affc7ef806901 | /backend/test_expo_app_23831/settings.py | 267df1ccc5565082c44b00a4f948a140ec41ff5d | [] | no_license | crowdbotics-apps/test-expo-app-23831 | 85aa164045a6d6988b4507ecb316d72a1482db74 | 67c0d3990ecc85359f9897676d58dad00bcdd3e7 | refs/heads/master | 2023-02-16T00:18:19.859203 | 2021-01-14T00:33:41 | 2021-01-14T00:33:41 | 329,466,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,782 | py | """
Django settings for test_expo_app_23831 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'test_expo_app_23831.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_expo_app_23831.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test_expo_app_23831',
'USER': 'test_expo_app_23831',
'PASSWORD': 'test_expo_app_23831',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
| [
"[email protected]"
] | |
431e0d44cfd2d2914eee2be9382d559ed9d7fc01 | 4daff90cfce1f253a3d8b14583dc0a038d553ca5 | /PyMess/MAG/SaveAllDip.py | 0e4e30b2180fcae15b7a47f17ab897311e2a895b | [
"MIT"
] | permissive | mattkjames7/PyMess | 42d0119a91d130649b3c601889ef132e38facb4f | f2c68285a7845a24d98284e20ed4292ed5e58138 | refs/heads/master | 2021-06-28T14:43:32.748427 | 2020-10-27T10:27:52 | 2020-10-27T10:27:52 | 174,409,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | import numpy as np
from ._SaveDip import _SaveDip
from .DataAvailability import DataAvailability
def SaveAllDip(Minute=False,StartI=0,EndI=None):
'''
This procedure should save all magnetometer data rotated into
a coordinate system useful for studying waves, with components in
the poloidal, toroidal and parallel directions.
Inputs:
Minute: Set to True to use minute resolution data, or False for
full time resolution data.
res: Tells the function to resample the MAG data to this time
resolution in seconds.
ModelParams: Parameters to use for the KT17 magnetic field model
When set to None, the values used are calculated based on
Mercury's distance from the Sun.
Ab: Aberration angle in degrees, set to None to calculate
automatically.
DetectGaps: If not None, then the routine will search for gaps
larger than DetectGaps in hours and insert NaNs, gaps
smaller than this are interpolated over.
'''
dates = DataAvailability(Minute,Type='MSO')
nf = np.size(dates)
if EndI is None:
EndI = nf
for i in range(StartI,EndI):
print('Converting File {0} of {1} ({2})'.format(i+1,nf,dates[i]))
_SaveDip(dates[i],Minute)
| [
"[email protected]"
] | |
6a244720c1ec569ef5ad9d6d031558d08080eddc | 87f574548a321a668f325bc3d120a45366b0b76b | /booking/migrations/0029_auto_20151020_1527.py | a7ae5a2639c51692d05636dbc2675c9b1f1421d8 | [] | no_license | judy2k/pipsevents | 1d19fb4c07e4a94d285e6b633e6ae013da0d1efd | 88b6ca7bb64b0bbbbc66d85d2fa9e975b1bd3081 | refs/heads/master | 2021-01-14T11:11:26.616532 | 2016-10-07T20:47:39 | 2016-10-07T20:55:13 | 36,600,721 | 0 | 0 | null | 2015-05-31T11:51:14 | 2015-05-31T11:51:14 | null | UTF-8 | Python | false | false | 5,457 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('booking', '0028_event_cancelled'),
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('extra_ticket_info', models.TextField(default='', blank=True)),
('extra_ticket_info1', models.TextField(default='', blank=True)),
],
),
migrations.CreateModel(
name='TicketBooking',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('date_booked', models.DateTimeField(default=django.utils.timezone.now)),
('date_rebooked', models.DateTimeField(null=True, blank=True)),
('paid', models.BooleanField(default=False)),
('payment_confirmed', models.BooleanField(help_text='Payment confirmed by admin/organiser', default=False)),
('date_payment_confirmed', models.DateTimeField(null=True, blank=True)),
('cancelled', models.BooleanField(default=False)),
('reminder_sent', models.BooleanField(default=False)),
('warning_sent', models.BooleanField(default=False)),
('booking_reference', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='TicketedEvent',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField(max_length=255)),
('description', models.TextField(default='', blank=True)),
('date', models.DateTimeField()),
('location', models.CharField(max_length=255, default='Watermelon Studio')),
('max_tickets', models.PositiveIntegerField(help_text='Leave blank if no max number', null=True, blank=True)),
('contact_person', models.CharField(max_length=255, default='Gwen Burns')),
('contact_email', models.EmailField(max_length=254, default='[email protected]')),
('ticket_cost', models.DecimalField(default=0, decimal_places=2, max_digits=8)),
('advance_payment_required', models.BooleanField(default=True)),
('show_on_site', models.BooleanField(help_text='Tick to show on the site', default=True)),
('payment_open', models.BooleanField(default=True)),
('payment_info', models.TextField(blank=True)),
('payment_due_date', models.DateTimeField(help_text='Tickets that are not paid by the payment due date will be automatically cancelled (a warning email will be sent to users first).', null=True, blank=True)),
('payment_time_allowed', models.PositiveIntegerField(help_text='Number of hours allowed for payment after booking (after this ticket purchases will be cancelled. This will be ignored if there is a payment due date set on the event itself. ', null=True, blank=True)),
('email_studio_when_purchased', models.BooleanField(default=False)),
('max_ticket_purchase', models.PositiveIntegerField(help_text='Limit the number of tickets that can be purchased at one time', null=True, blank=True)),
('extra_ticket_info_label', models.CharField(max_length=255, default='', blank=True)),
('extra_ticket_info_help', models.CharField(help_text='Description/details/help text to display under the extra info field', max_length=255, default='', blank=True)),
('extra_ticket_info_required', models.BooleanField(help_text='Tick if this information is mandatory when booking tickets', default=False)),
('extra_ticket_info1_label', models.CharField(max_length=255, default='', blank=True)),
('extra_ticket_info1_help', models.CharField(help_text='Description/details/help text to display under the extra info field', max_length=255, default='', blank=True)),
('extra_ticket_info1_required', models.BooleanField(help_text='Tick if this information is mandatory when booking tickets', default=False)),
('slug', django_extensions.db.fields.AutoSlugField(max_length=40, unique=True, populate_from='name', blank=True, editable=False)),
],
options={
'ordering': ['-date'],
},
),
migrations.AddField(
model_name='ticketbooking',
name='ticketed_event',
field=models.ForeignKey(related_name='ticket_bookings', to='booking.TicketedEvent'),
),
migrations.AddField(
model_name='ticketbooking',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='ticket',
name='ticket_booking',
field=models.ForeignKey(related_name='tickets', to='booking.TicketBooking'),
),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.