blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9df488729a33c40b7f0a79805a0e490939c392cc | de06c4a1fb02fd23eadfc58c770d87edfd0a6d38 | /group_buying/payment/migrations/0002_auto_20200829_0923.py | da6c064bd2bf89bfec813229cb49073801216a4d | []
| no_license | saisantoshchirag/group_buying | c12dc0bf1882cf03d20e6865dd98105a28907f80 | 1d9fb28f99dfb9b085e43bb5429bde476680ffa7 | refs/heads/master | 2023-08-15T01:27:56.203321 | 2020-11-13T11:16:36 | 2020-11-13T11:16:36 | 267,057,651 | 0 | 1 | null | 2021-09-22T19:21:23 | 2020-05-26T13:58:14 | HTML | UTF-8 | Python | false | false | 1,038 | py | # Generated by Django 2.1.5 on 2020-08-29 03:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='orders',
name='address',
),
migrations.RemoveField(
model_name='orders',
name='city',
),
migrations.RemoveField(
model_name='orders',
name='email',
),
migrations.RemoveField(
model_name='orders',
name='items_json',
),
migrations.RemoveField(
model_name='orders',
name='name',
),
migrations.RemoveField(
model_name='orders',
name='phone',
),
migrations.RemoveField(
model_name='orders',
name='state',
),
migrations.RemoveField(
model_name='orders',
name='zip_code',
),
]
| [
"[email protected]"
]
| |
538215aa0cc6b8084fff013b4fd1dac21131423c | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /80_best.py | c500e3032fd796de2b2a3073cdc4baa3dbdbb67f | []
| no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n=0
p=0
pre=float("inf")
for i in range(len(nums)):
if nums[i]==pre:
if n==2:
continue
else:
n+=1
nums[p]=nums[i]
p+=1
else:
n=1
nums[p]=nums[i]
p+=1
pre=nums[i]
return p
a=Solution()
test=[1,1,1,2,2,3]
print(a.removeDuplicates(test)) | [
"[email protected]"
]
| |
ce9a504baf33919b24dc53bdf46a87dc45cd164e | fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4 | /everydays/day002/flask_test/hm_07_helloflask.py | c5e844f1d49376ac75384f887e29197ae23fd8cb | []
| no_license | jake20001/Hello | be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605 | 08217871bb17152eb09e68cd154937ebe5d59d2c | refs/heads/master | 2021-07-10T09:48:15.883716 | 2021-04-23T14:49:03 | 2021-04-23T14:49:03 | 56,282,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | from datetime import timedelta
from flask import Flask, session,jsonify
app = Flask(__name__)
# 设置应用秘钥会被用于session签名
app.secret_key = 'test'
# 设置session过期时间 默认31天
print(f'默认过期时间: {app.permanent_session_lifetime}')
# 通过赋值一个 timedelta 对象来修改 session 的过期时间
app.permanent_session_lifetime = timedelta(days=0,seconds=20)
print(f'测试过期时间: {app.permanent_session_lifetime}')
@app.route('/session')
def get_session():
# session是一个类字典对象
print(session)
return jsonify({key: value for key, value in session.items()})
@app.route('/session/set')
def set_session():
# session是一个类字典对象, 对其取值/赋值 就可以实现session数据的读写
# 记录session数据
session['username'] = 'zhangsan'
session['age'] = 100
return "set session"
@app.route('/session/delete')
def delete_session():
# 使用 del 来删除 session 的 key,但是要判断 key 是否在 session,如果不判断可能会出现异常
if 'username' in session:
del session['username']
return "delete session"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) | [
"[email protected]"
]
| |
c9979f423a456cb880b77c2b8a584ec0c5691070 | b007d88e6726452ffa8fe80300614f311ae5b318 | /educative.io/coding_patterns/hash_maps/isomorphic_string.py | 3f2177702a7f146a99345b2c40f7a05c9cd83761 | []
| no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | """
Given two strings, check whether two strings are isomorphic to each other or not. Two strings are isomorphic if a fixed mapping exists from the characters of one string to the characters of the other string. For example, if there are two instances of the character "a" in the first string, both these instances should be converted to another character (which could also remain the same character if "a" is mapped to itself) in the second string. This converted character should remain the same in both positions of the second string since there is a fixed mapping from the character "a" in the first string to the converted character in the second string.
"""
def is_isomorphic(string1, string2):
# Write your code here
# your code will replace this placeholder return statement
if len(string1) != len(string2):
return False
map_1 = {} # str1 to str2 mapping
map_2 = {} # str2 to str1 mapping
for i in range(len(string1)):
char_1 = string1[i]
char_2 = string2[i]
if char_1 in map_1 and map_1[char_1] != char_2:
return False
if char_2 in map_2 and map_2[char_2] != char_1:
return False
map_1[char_1] = char_2
map_2[char_2] = char_1
return True
def is_isomorphic(string1, string2):
# Write your code here
# your code will replace this placeholder return statement
if len(string1) != len(string2):
return False
map_1 = {} # str1 to str2 mapping
map_2 = {} # str2 to str1 mapping
for char_1, char_2 in zip(string1,string2):
if char_1 in map_1 and map_1[char_1] != char_2:
return False
if char_2 in map_2 and map_2[char_2] != char_1:
return False
map_1[char_1] = char_2
map_2[char_2] = char_1
return True
| [
"[email protected]"
]
| |
dc4ba9522892d2b29251cd8eab33b73c5fffbcf8 | 2d2c10ffa7aa5ee35393371e7f8c13b4fab94446 | /projects/ai/mrc/haihua/mrc_guwen/loss.py | b36544f5e4359d2393243aba18e0a179e657b745 | []
| no_license | faker2081/pikachu2 | bec83750a5ff3c7b5a26662000517df0f608c1c1 | 4f06d47c7bf79eb4e5a22648e088b3296dad3b2d | refs/heads/main | 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file loss.py
# \author chenghuige
# \date 2021-01-09 17:51:33.472128
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import tensorflow as tf
import melt as mt
from .config import *
def loss_fn(y_true, y_pred, x, model):
pred = y_pred
pred = tf.cast(pred, tf.float32)
loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
loss = loss_func(y_true, pred)
loss = mt.reduce_over(loss)
return loss
def get_loss(model=None):
loss_fn_ = model.get_loss()
# loss_fn_ = loss_fn
# if not FLAGS.custom_loss:
# loss_fn_ = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# else:
# loss_fn_ = model.get_loss()
return loss_fn_
| [
"[email protected]"
]
| |
b95feeca262a9036432d30423ce62dd23cffdd32 | 415fcefe59c8d33bc3f8b0784d48a7509ea7d5da | /addanother_example/models.py | f005bc5f0ebd821cc308a8ef2e021933eecd6f68 | []
| no_license | asifpy/django-quickstart | 6f517699375015584a7d17f112b70b8eeff89762 | 0ff625915cf169d3fb2f9646d9838260629c1576 | refs/heads/master | 2021-01-11T11:19:22.446634 | 2017-05-04T05:28:55 | 2017-05-04T05:28:55 | 72,719,312 | 2 | 1 | null | 2017-05-04T05:28:56 | 2016-11-03T07:24:32 | Python | UTF-8 | Python | false | false | 795 | py | from django.db import models
class Team(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Player(models.Model):
name = models.CharField(max_length=20)
current_team = models.ForeignKey(
"Team", related_name="current_players",
help_text='This demonstrates the wrapper adding an "add" button only'
)
future_team = models.ForeignKey(
"Team", related_name="future_players",
help_text='This demonstrates the wrapper adding both an "add" and an "edit" button'
)
previous_teams = models.ManyToManyField(
"Team", related_name="ancient_players",
help_text="This demonstrates the wrapper on a ManyToMany field"
)
def __str__(self):
return self.name | [
"[email protected]"
]
| |
a511646d6604a9c524b484c4ff7546e7ca14116e | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav457.py | 245f22a2fc444ac2254832c1c88ff8828465938b | []
| no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | #!/usr/bin/python
import sys
def compute(prey):
temp0 = -1 * prey[1]
if temp0 != 0:
temp1 = temp0 / temp0
else:
temp1 = temp0
temp0 = -1 * prey[1]
temp1 = temp0 * prey[0]
if temp0 != 0:
temp2 = temp1 % temp0
else:
temp2 = temp0
temp3 = temp1 + prey[1]
if temp2 > temp0:
if temp3 != 0:
temp3 = temp3 % temp3
else:
temp3 = temp3
else:
if temp2 > temp0:
if temp3 != 0:
temp3 = prey[0] % temp3
else:
temp3 = temp3
else:
temp3 = temp3 * prey[0]
if temp3 != 0:
temp1 = temp1 / temp3
else:
temp1 = temp3
if prey[1] > temp3:
temp1 = temp2 * temp2
else:
temp1 = prey[1] + prey[1]
if temp0 != 0:
temp1 = prey[1] / temp0
else:
temp1 = temp0
temp0 = prey[0] + temp0
temp2 = prey[0] + temp3
temp4 = -1 * prey[1]
if temp3 != 0:
temp0 = temp1 % temp3
else:
temp0 = temp3
temp4 = prey[0] + temp2
temp3 = prey[1] + temp3
temp1 = max(prey[1], temp3)
temp2 = temp2 + prey[1]
if temp1 > prey[1]:
if prey[0] > prey[0]:
temp0 = -1 * temp1
else:
temp0 = temp1 + prey[0]
else:
if prey[1] != 0:
temp0 = temp0 / prey[1]
else:
temp0 = prey[1]
if temp3 != 0:
temp5 = prey[1] / temp3
else:
temp5 = temp3
return [prey[1], temp5]
| [
"[email protected]"
]
| |
a5c866848db0a2d103e4eccf93def3588d598874 | f20da8440bae10fe73900f787fc7781f23196325 | /downsample/downsample_dense.py | ad5654289ac0181edcac53448c9e825628577396 | []
| no_license | ramesh720/recipe_zs2017_track2_phoneme | 9c5cdb3066a84e5059153b1390802e700c66978e | f8bbd9b8e6ae4f542e52c2582eab1cf166923226 | refs/heads/master | 2020-04-29T11:07:47.406768 | 2018-01-13T13:03:46 | 2018-01-13T13:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | #!/usr/bin/env python
"""
Perform dense downsampling over indicated segmentation intervals.
Author: Herman Kamper
Contact: [email protected]
Date: 2015-2017
"""
from datetime import datetime
from os import path
import argparse
import cPickle as pickle
import numpy as np
import scipy.signal as signal
import sys
OUTPUT_DIR = "embeddings"
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
def check_argv():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(description=__doc__.strip().split("\n")[0], add_help=False)
parser.add_argument("lang", type=str, choices=["english", "french", "mandarin", "LANG1", "LANG2"])
parser.add_argument("subset", type=str, choices=["train"]) #, "test"])
# parser.add_argument("landmarks", type=str, choices=["gtphone", "unsup_syl"], help="landmarks set")
parser.add_argument("landmarks", type=str, choices=["unsup_syl"], help="landmarks set")
parser.add_argument(
# "feature_type", type=str, help="input feature type", choices=["mfcc", "cae.d_10", "cae.d_13"]
"feature_type", type=str, help="input feature type", choices=["mfcc", "okko0"]
)
parser.add_argument("--n", type=int, help="number of samples (default: %(default)s)", default=10)
parser.add_argument(
"--frame_dims", type=int, default=None,
help="only keep these number of dimensions"
)
parser.add_argument(
"--n_landmarks_max", type=int,
help="maximum number of landmarks to cross (default: %(default)s)", default=6
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def downsample_utterance(features, seglist, n):
"""
Return the downsampled matrix with each row an embedding for a segment in
the seglist.
"""
embeddings = []
for i, j in seglist:
y = features[i:j+1, :].T
y_new = signal.resample(y, n, axis=1).flatten("C")
embeddings.append(y_new)
return np.asarray(embeddings)
#-----------------------------------------------------------------------------#
# MAIN FUNCTION #
#-----------------------------------------------------------------------------#
def main():
args = check_argv()
if args.feature_type == "mfcc":
input_npz_fn = path.join(
"..", "features", "mfcc", args.lang + "_" + args.subset, "numpy", "mfcc.cmvn_dd.npz"
)
elif args.feature_type == "okko0":
input_npz_fn = path.join(
"..", "features", "okko0", args.lang + "_" + args.subset, "segments.npz"
)
else:
assert False
print("Reading: " + input_npz_fn)
input_npz = np.load(input_npz_fn)
d_frame = input_npz[input_npz.keys()[0]].shape[1]
print("No. of utterances: " + str(len(input_npz.keys())))
seglist_pickle_fn = path.join(
OUTPUT_DIR, args.lang + "_" + args.subset, "seglist." + args.landmarks
+ ".n_max_" + str(args.n_landmarks_max) + ".pkl"
)
print("Reading: " + seglist_pickle_fn)
with open(seglist_pickle_fn, "rb") as f:
seglist_dict = pickle.load(f)
print("No. of utterances: " + str(len(seglist_dict)))
print("Frame dimensionality: " + str(d_frame))
if args.frame_dims is not None and args.frame_dims < d_frame:
d_frame = args.frame_dims
print("Reducing frame dimensionality: " + str(d_frame))
print("No. of samples: " + str(args.n))
print(datetime.now())
print("Downsampling")
downsample_dict = {}
for i, utt in enumerate(input_npz.keys()):
downsample_dict[utt] = downsample_utterance(
input_npz[utt][:, :args.frame_dims], seglist_dict[utt], args.n
)
print(datetime.now())
output_npz_fn = path.join(
OUTPUT_DIR, args.lang + "_" + args.subset, "downsample_dense." + args.feature_type +
".n_" + str(args.n) + ".n_max_" + str(args.n_landmarks_max) + "." + args.landmarks + ".npz"
)
print("Writing: " + output_npz_fn)
np.savez_compressed(output_npz_fn, **downsample_dict)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
0f149d04b001516f7e44891758e4f4b9fe1459e9 | 88d555a009f9075e59177fac70036892f397b439 | /bin/saluki_train_folds.py | b0855e380c8a5fecbcf452fe9356988f2c5c8f01 | [
"Apache-2.0"
]
| permissive | calico/basenji | f9f406971d355dda81821dcf274696a7d27e332d | 615b9eec8a591783b16d959029ddad08edae853d | refs/heads/master | 2023-09-04T11:14:15.620786 | 2023-07-27T00:05:13 | 2023-07-27T00:05:13 | 96,346,574 | 326 | 143 | Apache-2.0 | 2023-08-16T00:36:32 | 2017-07-05T17:54:18 | Python | UTF-8 | Python | false | false | 13,420 | py | #!/usr/bin/env python
# Copyright 2019 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import copy
import glob
import json
from natsort import natsorted
import os
import pdb
import pickle
import shutil
import subprocess
import sys
import numpy as np
import pandas as pd
import slurm
"""
saluki_train_folds.py
Train Saluki model replicates on cross folds using given parameters and data.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <data1_dir> ...'
parser = OptionParser(usage)
# train
train_options = OptionGroup(parser, 'saluki_train.py options')
train_options.add_option('-o', dest='out_dir',
default='train_out',
help='Output directory for test statistics [Default: %default]')
parser.add_option_group(train_options)
# test
test_options = OptionGroup(parser, 'saluki_test.py options')
test_options.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option_group(test_options)
# multi
rep_options = OptionGroup(parser, 'replication options')
rep_options.add_option('-c', dest='crosses',
default=1, type='int',
help='Number of cross-fold rounds [Default:%default]')
rep_options.add_option('-e', dest='conda_env',
default='tf28',
help='Anaconda environment [Default: %default]')
rep_options.add_option('-f', dest='fold_subset',
default=None, type='int',
help='Run a subset of folds [Default:%default]')
rep_options.add_option('--name', dest='name',
default='fold', help='SLURM name prefix [Default: %default]')
rep_options.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
rep_options.add_option('-q', dest='queue',
default='geforce',
help='SLURM queue on which to run the jobs [Default: %default]')
rep_options.add_option('-r', dest='restart',
default=False, action='store_true')
rep_options.add_option('--test_off', dest='test_off',
default=False, action='store_true')
rep_options.add_option('--test_train_off', dest='test_train_off',
default=False, action='store_true')
parser.add_option_group(rep_options)
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error('Must provide parameters and data directory.')
else:
params_file = os.path.abspath(args[0])
data_dirs = [os.path.abspath(arg) for arg in args[1:]]
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_train = params['train']
#######################################################
# prep work
if not options.restart and os.path.isdir(options.out_dir):
print('Output directory %s exists. Please remove.' % options.out_dir)
exit(1)
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
# read data parameters
num_data = len(data_dirs)
data_stats_file = '%s/statistics.json' % data_dirs[0]
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
# count folds
num_folds = len([dkey for dkey in data_stats if dkey.startswith('fold')])
# subset folds
if options.fold_subset is not None:
num_folds = min(options.fold_subset, num_folds)
# arrange data
for ci in range(options.crosses):
for fi in range(num_folds):
rep_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
os.makedirs(rep_dir, exist_ok=True)
# make data directories
for di in range(num_data):
rep_data_dir = '%s/data%d' % (rep_dir, di)
if not os.path.isdir(rep_data_dir):
make_rep_data(data_dirs[di], rep_data_dir, fi, ci)
#######################################################
# train
jobs = []
for ci in range(options.crosses):
for fi in range(num_folds):
rep_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
if options.restart and os.path.isdir('%s/train'%rep_dir):
print('%s found and skipped.' % rep_dir)
else:
# collect data directories
rep_data_dirs = []
for di in range(num_data):
rep_data_dirs.append('%s/data%d' % (rep_dir, di))
# train command
cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd += ' conda activate %s;' % options.conda_env
cmd += ' echo $HOSTNAME;'
cmd += ' saluki_train.py'
cmd += ' %s' % options_string(options, train_options, rep_dir)
cmd += ' %s %s' % (params_file, ' '.join(rep_data_dirs))
name = '%s-train-f%dc%d' % (options.name, fi, ci)
sbf = os.path.abspath('%s/train.sb' % rep_dir)
outf = os.path.abspath('%s/train.out' % rep_dir)
errf = os.path.abspath('%s/train.err' % rep_dir)
j = slurm.Job(cmd, name,
outf, errf, sbf,
queue=options.queue,
cpu=4,
gpu=params_train.get('num_gpu',1),
mem=30000, time='2-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, max_proc=options.processes, verbose=True,
launch_sleep=10, update_sleep=60)
#######################################################
# test train
jobs = []
if not options.test_train_off:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
for di in range(num_data):
if num_data == 1:
out_dir = '%s/test_train' % it_dir
model_file = '%s/train/model_best.h5' % it_dir
else:
out_dir = '%s/test%d_train' % (it_dir, di)
model_file = '%s/train/model%d_best.h5' % (it_dir, di)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' saluki_test.py'
basenji_cmd += ' --head %d' % di
basenji_cmd += ' -o %s' % out_dir
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' --split train'
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s' % model_file
basenji_cmd += ' %s/data%d' % (it_dir, di)
name = '%s-testtr-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=2, gpu=1,
mem=23000,
time='8:00:00')
jobs.append(basenji_job)
#######################################################
# test best
if not options.test_off:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
for di in range(num_data):
if num_data == 1:
out_dir = '%s/test' % it_dir
model_file = '%s/train/model_best.h5' % it_dir
else:
out_dir = '%s/test%d' % (it_dir, di)
model_file = '%s/train/model%d_best.h5' % (it_dir, di)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' saluki_test.py'
basenji_cmd += ' --head %d' % di
# TEMP
basenji_cmd += ' --save'
basenji_cmd += ' -o %s' % out_dir
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s' % model_file
basenji_cmd += ' %s/data%d' % (it_dir, di)
name = '%s-test-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=2, gpu=1,
mem=23000,
time='4:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, max_proc=options.processes, verbose=True,
launch_sleep=10, update_sleep=60)
def make_rep_data(data_dir, rep_data_dir, fi, ci):
# read data parameters
data_stats_file = '%s/statistics.json' % data_dir
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
# sequences per fold
fold_seqs = []
dfi = 0
while 'fold%d_seqs'%dfi in data_stats:
fold_seqs.append(data_stats['fold%d_seqs'%dfi])
del data_stats['fold%d_seqs'%dfi]
dfi += 1
num_folds = dfi
# split folds into train/valid/test
test_fold = fi
valid_fold = (fi+1+ci) % num_folds
train_folds = [fold for fold in range(num_folds) if fold not in [valid_fold,test_fold]]
# clear existing directory
if os.path.isdir(rep_data_dir):
shutil.rmtree(rep_data_dir)
# make data directory
os.mkdir(rep_data_dir)
# dump data stats
data_stats['test_seqs'] = fold_seqs[test_fold]
data_stats['valid_seqs'] = fold_seqs[valid_fold]
data_stats['train_seqs'] = sum([fold_seqs[tf] for tf in train_folds])
with open('%s/statistics.json'%rep_data_dir, 'w') as data_stats_open:
json.dump(data_stats, data_stats_open, indent=4)
# genes table
genes_df = pd.read_csv('%s/genes.tsv' % data_dir, sep='\t', index_col=0)
gene_split = np.array(['train']*genes_df.shape[0])
gene_split[genes_df.Fold==test_fold] = 'test'
gene_split[genes_df.Fold==valid_fold] = 'valid'
genes_df['Split'] = gene_split
genes_df.to_csv('%s/genes.tsv'%rep_data_dir, sep='\t')
# copy targets
shutil.copy('%s/targets.txt'%data_dir, '%s/targets.txt'%rep_data_dir)
# sym link tfrecords
rep_tfr_dir = '%s/tfrecords' % rep_data_dir
os.mkdir(rep_tfr_dir)
# test tfrecords
ti = 0
test_tfrs = natsorted(glob.glob('%s/tfrecords/fold%d-*.tfr' % (data_dir, test_fold)))
for test_tfr in test_tfrs:
test_tfr = os.path.abspath(test_tfr)
test_rep_tfr = '%s/test-%d.tfr' % (rep_tfr_dir, ti)
os.symlink(test_tfr, test_rep_tfr)
ti += 1
# valid tfrecords
ti = 0
valid_tfrs = natsorted(glob.glob('%s/tfrecords/fold%d-*.tfr' % (data_dir, valid_fold)))
for valid_tfr in valid_tfrs:
valid_tfr = os.path.abspath(valid_tfr)
valid_rep_tfr = '%s/valid-%d.tfr' % (rep_tfr_dir, ti)
os.symlink(valid_tfr, valid_rep_tfr)
ti += 1
# train tfrecords
ti = 0
train_tfrs = []
for tfi in train_folds:
train_tfrs += natsorted(glob.glob('%s/tfrecords/fold%d-*.tfr' % (data_dir, tfi)))
for train_tfr in train_tfrs:
train_tfr = os.path.abspath(train_tfr)
train_rep_tfr = '%s/train-%d.tfr' % (rep_tfr_dir, ti)
os.symlink(train_tfr, train_rep_tfr)
ti += 1
def options_string(options, train_options, rep_dir):
options_str = ''
for opt in train_options.option_list:
opt_str = opt.get_opt_string()
opt_value = options.__dict__[opt.dest]
# wrap askeriks in ""
if type(opt_value) == str and opt_value.find('*') != -1:
opt_value = '"%s"' % opt_value
# no value for bools
elif type(opt_value) == bool:
if not opt_value:
opt_str = ''
opt_value = ''
# skip Nones
elif opt_value is None:
opt_str = ''
opt_value = ''
# modify
elif opt.dest == 'out_dir':
opt_value = '%s/train' % rep_dir
# find matching restore
elif opt.dest == 'restore':
fold_dir_mid = rep_dir.split('/')[-1]
if options.trunk:
opt_value = '%s/%s/train/model_trunk.h5' % (opt_value, fold_dir_mid)
else:
opt_value = '%s/%s/train/model_best.h5' % (opt_value, fold_dir_mid)
options_str += ' %s %s' % (opt_str, opt_value)
return options_str
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
e0cef2e9484c65cdeaf1980ebb7c8d939eeb49b2 | 738b4fd5d8ebb8c424947a6786bd41ba30df46d6 | /ibeatles/fitting/fitting_launcher.py | 4d66c124f6dace8967669f0f642adedd8f81d6c0 | [
"MIT"
]
| permissive | indudhiman/bragg-edge | ba6e5c02e2bf2c2c5f87b626a4578238f7973e43 | 56af0a448534ef9cb5428879ba900e194dc05db2 | refs/heads/master | 2020-04-16T22:49:53.274903 | 2019-01-08T14:18:32 | 2019-01-08T14:18:32 | 165,985,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,018 | py | try:
import PyQt4
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
from PyQt4.QtGui import QMainWindow
from PyQt4.QtGui import QApplication
except:
import PyQt5
import PyQt5.QtGui as QtGui
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QApplication
from pyqtgraph.dockarea import *
import pyqtgraph as pg
import numpy as np
from ibeatles.interfaces.ui_fittingWindow import Ui_MainWindow as UiMainWindow
from ibeatles.utilities.colors import pen_color
from ibeatles.utilities.array_utilities import find_nearest_index
from ibeatles.fitting.fitting_handler import FittingHandler
from ibeatles.fitting.value_table_handler import ValueTableHandler
from ibeatles.fitting.selected_bin_handler import SelectedBinsHandler
from ibeatles.table_dictionary.table_dictionary_handler import TableDictionaryHandler
from ibeatles.fitting.filling_table_handler import FillingTableHandler
from ibeatles.fitting.fitting_initialization_handler import FittingInitializationHandler
from ibeatles.fitting.create_fitting_story_launcher import CreateFittingStoryLauncher
class FittingLauncher(object):
def __init__(self, parent=None):
self.parent = parent
if self.parent.fitting_ui == None:
fitting_window = FittingWindow(parent=parent)
fitting_window.show()
self.parent.fitting_ui = fitting_window
o_fitting = FittingHandler(parent=self.parent)
o_fitting.display_image()
o_fitting.display_roi()
o_fitting.fill_table()
fitting_window.check_advanced_table_status()
else:
self.parent.fitting_ui.setFocus()
self.parent.fitting_ui.activateWindow()
class FittingWindow(QMainWindow):
data = []
there_is_a_roi = False
bragg_edge_active_button_status = True # to make sure active/lock button worked correctly
list_bins_selected_item = []
list_bins_locked_item = []
image_view = None
bragg_edge_plot = None
line_view = None
line_view_fitting = None #roi selected in binning window
all_bins_button = None
indi_bins_button = None
header_value_tables_match = {0: [0],
1: [1],
2: [2],
3: [3],
4: [4],
5: [5,6],
6: [7,8],
7: [9,10],
8: [11,12],
9: [13,14],
10: [15,16],
11: [17,18],
12: [19,20]}
para_cell_width = 110
header_table_columns_width = [30, 30, 50,50,100,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width]
fitting_table_columns_width = [header_table_columns_width[0],
header_table_columns_width[1],
header_table_columns_width[2],
header_table_columns_width[3],
header_table_columns_width[4],
np.int(header_table_columns_width[5]/2),
np.int(header_table_columns_width[5]/2),
np.int(header_table_columns_width[6]/2),
np.int(header_table_columns_width[6]/2),
np.int(header_table_columns_width[7]/2),
np.int(header_table_columns_width[7]/2),
np.int(header_table_columns_width[8]/2),
np.int(header_table_columns_width[8]/2),
np.int(header_table_columns_width[9]/2),
np.int(header_table_columns_width[9]/2),
np.int(header_table_columns_width[10]/2),
np.int(header_table_columns_width[10]/2),
np.int(header_table_columns_width[11]/2),
np.int(header_table_columns_width[11]/2),
np.int(header_table_columns_width[12]/2),
np.int(header_table_columns_width[12]/2)]
# status of alpha and sigma initialization
sigma_alpha_initialized = False
initialization_table = {'d_spacing': np.NaN,
'alpha': np.NaN,
'sigma': np.NaN,
'a1': np.NaN,
'a2': np.NaN,
'a5': np.NaN,
'a6': np.NaN}
bragg_edge_data = {'x_axis': [],
'y_axis': []}
def __init__(self, parent=None):
self.parent = parent
QMainWindow.__init__(self, parent=parent)
self.ui = UiMainWindow()
self.ui.setupUi(self)
self.setWindowTitle("5. Fitting")
self.init_pyqtgraph()
self.init_labels()
self.init_widgets()
self.init_table_behavior()
self.check_status_widgets()
def re_fill_table(self):
o_fitting = FittingHandler(parent=self.parent)
o_fitting.fill_table()
def init_table_behavior(self):
for _column, _width in enumerate(self.header_table_columns_width):
self.ui.header_table.setColumnWidth(_column, _width)
for _column, _width in enumerate(self.fitting_table_columns_width):
self.ui.value_table.setColumnWidth(_column, _width)
self.hori_header_table = self.ui.header_table.horizontalHeader()
self.hori_value_table = self.ui.value_table.horizontalHeader()
self.hori_header_table.sectionResized.connect(self.resizing_header_table)
self.hori_value_table.sectionResized.connect(self.resizing_value_table)
self.hori_header_table.sectionClicked.connect(self.column_header_table_clicked)
self.hori_value_table.sectionClicked.connect(self.column_value_table_clicked)
def column_value_table_clicked(self, column):
'''
to make sure that if the val or err column is selected, or unselected, the other
column behave the same
'''
if column < 5:
return
_item0 = self.parent.fitting_ui.ui.value_table.item(0, column)
state_column_clicked = self.parent.fitting_ui.ui.value_table.isItemSelected(_item0)
if column % 2 == 0:
col1 = column-1
col2 = column
else:
col1 = column
col2 = column+1
nbr_row = self.parent.fitting_ui.ui.value_table.rowCount()
range_selected = QtGui.QTableWidgetSelectionRange(0, col1, nbr_row-1, col2)
self.parent.fitting_ui.ui.value_table.setRangeSelected(range_selected,
state_column_clicked)
def column_header_table_clicked(self, column):
_value_table_column = self.header_value_tables_match.get(column, -1)
nbr_row = self.parent.fitting_ui.ui.value_table.rowCount()
# if both col already selected, unselect them
col_already_selected = False
_item1 = self.parent.fitting_ui.ui.value_table.item(0, _value_table_column[0])
_item2 = self.parent.fitting_ui.ui.value_table.item(0, _value_table_column[-1])
if self.parent.fitting_ui.ui.value_table.isItemSelected(_item1) and \
self.parent.fitting_ui.ui.value_table.isItemSelected(_item2):
col_already_selected = True
if column in [2,3]:
selection = self.parent.fitting_ui.ui.value_table.selectedRanges()
col_already_selected = False
for _select in selection:
if column in [_select.leftColumn(), _select.rightColumn()]:
col_already_selected = True
break
from_col = _value_table_column[0]
to_col = _value_table_column[-1]
range_selected = QtGui.QTableWidgetSelectionRange(0, from_col,
nbr_row-1, to_col)
self.parent.fitting_ui.ui.value_table.setRangeSelected(range_selected,
not col_already_selected)
def resizing_header_table(self, index_column, old_size, new_size):
if index_column < 5:
self.ui.value_table.setColumnWidth(index_column, new_size)
else:
new_half_size = np.int(new_size/2)
index1 = (index_column - 5) * 2 + 5
index2 = index1+1
self.ui.value_table.setColumnWidth(index1, new_half_size)
self.ui.value_table.setColumnWidth(index2, new_half_size)
def resizing_value_table(self, index_column, old_size, new_size):
if index_column < 5:
self.ui.header_table.setColumnWidth(index_column, new_size)
else:
if (index_column % 2) == 1:
right_new_size = self.ui.value_table.columnWidth(index_column + 1)
index_header = np.int(index_column - 5) / 2 + 5
self.ui.header_table.setColumnWidth(index_header, new_size + right_new_size)
else:
left_new_size = self.ui.value_table.columnWidth(index_column - 1)
index_header = np.int(index_column - 6) / 2 + 5
self.ui.header_table.setColumnWidth(index_header, new_size + left_new_size)
def init_widgets(self):
'''
such as material h,k,l list according to material selected in normalized tab
'''
hkl_list = self.parent.selected_element_hkl_array
str_hkl_list = ["{},{},{}".format(_hkl[0], _hkl[1], _hkl[2]) for _hkl in hkl_list]
self.ui.hkl_list_ui.addItems(str_hkl_list)
def check_status_widgets(self):
if (len(self.parent.data_metadata['normalized']['data_live_selection']) > 0) and \
not (self.parent.binning_line_view['pos'] is None):
status = True
else:
status = False
self.ui.instructions_step1_button.setEnabled(status)
def init_labels(self):
self.ui.lambda_min_label.setText(u"\u03BB<sub>min</sub>")
self.ui.lambda_max_label.setText(u"\u03BB<sub>max</sub>")
self.ui.lambda_min_units.setText(u"\u212B")
self.ui.lambda_max_units.setText(u"\u212B")
self.ui.bragg_edge_units.setText(u"\u212B")
self.ui.material_groupBox.setTitle(self.parent.selected_element_name)
def init_pyqtgraph(self):
if (len(self.parent.data_metadata['normalized']['data_live_selection']) > 0) and \
not (self.parent.binning_line_view['pos'] is None):
status = True
else:
status = False
area = DockArea()
self.ui.area = area
area.setVisible(status)
d1 = Dock("Image Preview", size=(200, 300))
d2 = Dock("Bragg Edge", size=(200, 100))
area.addDock(d1, 'top')
area.addDock(d2, 'bottom')
preview_widget = pg.GraphicsLayoutWidget()
pg.setConfigOptions(antialias=True) # this improve display
vertical_layout = QtGui.QVBoxLayout()
preview_widget.setLayout(vertical_layout)
# image view (top plot)
image_view = pg.ImageView()
image_view.ui.roiBtn.hide()
image_view.ui.menuBtn.hide()
self.image_view = image_view
image_view.scene.sigMouseMoved.connect(self.mouse_moved_in_image_view)
top_widget = QtGui.QWidget()
vertical = QtGui.QVBoxLayout()
vertical.addWidget(image_view)
# bin transparency
transparency_layout = QtGui.QHBoxLayout()
spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
transparency_layout.addItem(spacer)
label = QtGui.QLabel("Bin Transparency")
transparency_layout.addWidget(label)
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setMaximum(100)
slider.setMinimum(0)
slider.setValue(50)
slider.valueChanged.connect(self.slider_changed)
self.slider = slider
transparency_layout.addWidget(slider)
bottom_widget = QtGui.QWidget()
bottom_widget.setLayout(transparency_layout)
top_widget.setLayout(vertical)
d1.addWidget(top_widget)
d1.addWidget(bottom_widget)
# bragg edge plot (bottom plot)
bragg_edge_plot = pg.PlotWidget(title='')
bragg_edge_plot.plot()
self.bragg_edge_plot = bragg_edge_plot
# plot all or individual bins
buttons_layout = QtGui.QHBoxLayout()
spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
buttons_layout.addItem(spacer)
label = QtGui.QLabel("Plot")
label.setEnabled(False)
buttons_layout.addWidget(label)
# all bins button
active_button = QtGui.QRadioButton()
active_button.setText("Active Bins")
active_button.setChecked(True)
#active_button.setEnabled(False)
active_button.pressed.connect(self.active_button_pressed)
self.ui.active_bins_button = active_button
# indi bin button
buttons_layout.addWidget(active_button)
locked_button = QtGui.QRadioButton()
locked_button.setText("Locked Bins")
locked_button.setChecked(False)
#locked_button.setEnabled(False)
locked_button.pressed.connect(self.lock_button_pressed)
self.ui.locked_bins_button = locked_button
buttons_layout.addWidget(locked_button)
bottom_widget = QtGui.QWidget()
bottom_widget.setLayout(buttons_layout)
d2.addWidget(bragg_edge_plot)
d2.addWidget(bottom_widget)
vertical_layout.addWidget(area)
self.ui.widget.setLayout(vertical_layout)
def active_button_pressed(self):
self.bragg_edge_active_button_status = True
self.update_bragg_edge_plot()
def lock_button_pressed(self):
self.bragg_edge_active_button_status = False
self.update_bragg_edge_plot()
def mouse_moved_in_image_view(self):
self.image_view.setFocus(True)
def hkl_list_changed(self, hkl):
bragg_edges_array = self.parent.selected_element_bragg_edges_array
if bragg_edges_array:
if str(hkl) == '':
value = "N/A"
else:
hkl_array = self.parent.selected_element_hkl_array
str_hkl_list = ["{},{},{}".format(_hkl[0], _hkl[1], _hkl[2]) for _hkl in hkl_array]
hkl_bragg_edges = dict(zip(str_hkl_list, bragg_edges_array))
value = "{:04.3f}".format(hkl_bragg_edges[str(hkl)])
else:
value = "N/A"
self.ui.bragg_edge_calculated.setText(value)
def slider_changed(self):
o_fitting_handler = FittingHandler(parent=self.parent)
o_fitting_handler.display_roi()
def active_button_state_changed(self, status, row_clicked):
'''
status: 0: off
2: on
'''
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
update_lock_flag = False
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.ui.selection_table.blockSignals(True)
if status == 0:
status = False
else:
status = True
# perform same status on all rows
_selection = self.ui.value_table.selectedRanges()
_this_column_is_selected = False
for _select in _selection:
if 3 in [_select.leftColumn(), _select.rightColumn()]:
_this_column_is_selected = True
break
table_dictionary = self.parent.table_dictionary
if _this_column_is_selected:
update_selection_flag = True #we change the state so we need to update the selection
for _index in table_dictionary:
table_dictionary[_index]['active'] = status
_widget_lock = self.ui.value_table.cellWidget(int(_index), 3)
_widget_lock.blockSignals(True)
_widget_lock.setChecked(status)
_widget_lock.blockSignals(False)
if status:
_widget = self.ui.value_table.cellWidget(int(_index), 2)
if _widget.isChecked(): # because we can not be active and locked at the same time
table_dictionary[_index]['lock'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
else:
table_dictionary[str(row_clicked)]['active'] = status
if status:
_widget = self.ui.value_table.cellWidget(row_clicked, 2)
if _widget.isChecked():
table_dictionary[str(row_clicked)]['lock'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
update_lock_flag = True
self.parent.table_dictionary = table_dictionary
# hide this row if status is False and user only wants to see locked items
o_filling_handler = FillingTableHandler(parent = self.parent)
if (status == False) and (o_filling_handler.get_row_to_show_state() == 'active'):
self.parent.fitting_ui.ui.value_table.hideRow(row_clicked)
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_selected()
self.update_bragg_edge_plot()
o_bin_handler.update_bins_locked()
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.update_selection_table()
if update_lock_flag:
self.parent.advanced_selection_ui.update_lock_table()
self.parent.advanced_selection_ui.ui.selection_table.blockSignals(False)
QApplication.restoreOverrideCursor()
def lock_button_state_changed(self, status, row_clicked):
'''
status: 0: off
2: on
we also need to make sure that if the button is lock, it can not be activated !
'''
update_selection_flag = False
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.ui.lock_table.blockSignals(True)
if status == 0:
status = False
else:
status = True
# perform same status on all rows
_selection = self.ui.value_table.selectedRanges()
_this_column_is_selected = False
for _select in _selection:
if 2 in [_select.leftColumn(), _select.rightColumn()]:
_this_column_is_selected = True
break
table_dictionary = self.parent.table_dictionary
if _this_column_is_selected:
update_selection_flag = True #we change the state so we need to update the selection
for _index in table_dictionary:
table_dictionary[_index]['lock'] = status
_widget_lock = self.ui.value_table.cellWidget(int(_index), 2)
_widget_lock.blockSignals(True)
_widget_lock.setChecked(status)
_widget_lock.blockSignals(False)
if status:
_widget = self.ui.value_table.cellWidget(int(_index), 3)
if _widget.isChecked(): # because we can not be active and locked at the same time
table_dictionary[_index]['active'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
else:
table_dictionary[str(row_clicked)]['lock'] = status
if status:
_widget = self.ui.value_table.cellWidget(row_clicked, 3)
if _widget.isChecked(): # because we can not be active and locked at the same time
table_dictionary[str(row_clicked)]['active'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
update_selection_flag = True #we change the state so we need to update the selection
self.parent.table_dictionary = table_dictionary
# hide this row if status is False and user only wants to see locked items
o_filling_handler = FillingTableHandler(parent = self.parent)
if (status == False) and (o_filling_handler.get_row_to_show_state() == 'lock'):
self.parent.fitting_ui.ui.value_table.hideRow(row_clicked)
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_locked()
self.update_bragg_edge_plot()
o_bin_handler.update_bins_selected()
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.update_lock_table()
if update_selection_flag:
self.parent.advanced_selection_ui.update_selection_table()
self.parent.advanced_selection_ui.ui.lock_table.blockSignals(False)
def value_table_right_click(self, position):
o_table_handler = ValueTableHandler(parent=self.parent)
o_table_handler.right_click(position=position)
def update_image_view_selection(self):
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_selected()
def update_image_view_lock(self):
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_locked()
def update_bragg_edge_plot(self, update_selection=True):
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bragg_edge_plot()
if update_selection:
self.bragg_edge_linear_region_changing()
def selection_in_value_table_of_rows_cell_clicked(self, row, column):
# make sure the selection is right (val and err selected at the same time)
if column > 4:
_item0 = self.ui.value_table.item(0, column)
_is_selected = self.ui.value_table.isItemSelected(_item0)
if (column % 2) == 0:
left_column = column - 1
right_column = column
else:
left_column = column
right_column = column + 1
nbr_row = self.ui.value_table.rowCount()
_selection = QtGui.QTableWidgetSelectionRange(0, left_column,
nbr_row-1, right_column)
self.ui.value_table.setRangeSelected(_selection, _is_selected)
self.update_bragg_edge_plot()
def selection_in_value_table_changed(self):
self.selection_in_value_table_of_rows_cell_clicked(-1, -1)
def bragg_edge_linear_region_changing(self):
#current xaxis is
x_axis = self.parent.fitting_bragg_edge_x_axis
_lr = self.parent.fitting_lr
if _lr is None:
return
selection = list(_lr.getRegion())
left_index = find_nearest_index(array = x_axis, value=selection[0])
right_index = find_nearest_index(array = x_axis, value=selection[1])
# display lambda left and right
lambda_array = self.parent.data_metadata['time_spectra']['normalized_lambda'] * 1e10
_lambda_min = lambda_array[left_index]
_lambda_max = lambda_array[right_index]
self.ui.lambda_min_lineEdit.setText("{:4.2f}".format(_lambda_min))
self.ui.lambda_max_lineEdit.setText("{:4.2f}".format(_lambda_max))
def bragg_edge_linear_region_changed(self):
#current xaxis is
x_axis = self.parent.normalized_lambda_bragg_edge_x_axis
_lr = self.parent.fitting_lr
if _lr is None:
return
selection = list(_lr.getRegion())
left_index = find_nearest_index(array = x_axis, value=selection[0])
right_index = find_nearest_index(array = x_axis, value=selection[1])
list_selected = [left_index, right_index]
self.parent.fitting_bragg_edge_linear_selection = list_selected
def check_advanced_table_status(self):
button_status = self.ui.advanced_table_checkBox.isChecked()
self.advanced_table_clicked(button_status)
def advanced_table_clicked(self, status):
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
o_table_handler = FillingTableHandler(parent=self.parent)
o_table_handler.set_mode(advanced_mode = status)
QApplication.restoreOverrideCursor()
def update_table(self):
o_filling_table = FillingTableHandler(parent = self.parent)
self.parent.fitting_ui.ui.value_table.blockSignals(True)
o_filling_table.fill_table()
self.parent.fitting_ui.ui.value_table.blockSignals(False)
def min_or_max_lambda_manually_changed(self):
min_lambda = float(str(self.ui.lambda_min_lineEdit.text()))
max_lambda = float(str(self.ui.lambda_max_lineEdit.text()))
lambda_array = self.parent.data_metadata['time_spectra']['normalized_lambda'] * 1e10
left_index = find_nearest_index(array=lambda_array, value=min_lambda)
right_index = find_nearest_index(array=lambda_array, value=max_lambda)
self.parent.fitting_bragg_edge_linear_selection = [left_index, right_index]
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bragg_edge_plot()
def initialize_all_parameters_button_clicked(self):
o_initialization = FittingInitializationHandler(parent=self.parent)
o_initialization.make_all_active()
o_initialization.run()
def initialize_all_parameters_step2(self):
o_initialization = FittingInitializationHandler(parent=self.parent)
o_initialization.finished_up_initialization()
# activate or not step4 (yes if we were able to initialize correctly all variables)
self.ui.step4_groupBox.setEnabled(o_initialization.all_variables_initialized)
self.update_bragg_edge_plot()
def fit_table_active_cell_checked(self):
pass
def create_fitting_story_checked(self):
o_story = CreateFittingStoryLauncher(parent=self.parent)
def closeEvent(self, event=None):
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.close()
if self.parent.fitting_set_variables_ui:
self.parent.fitting_set_variables_ui.close()
self.parent.fitting_ui = None
| [
"[email protected]"
]
| |
8425cd0230586cba7d321dc4706f57f721a3c5d4 | b246bdb4ae3d845bbf8dee704b8936c32211c0f5 | /Figure_1/initial_subtyping/do_tsne.py | fe059a8bd9f38cb0c2f026356b59d034111066fc | []
| no_license | KnottLab/bladder-snSeq | abfd3d77a04250622e6a28d84878e5adcd335d00 | 2e087dc745046e30c2814ab3e4c295bfa34e6820 | refs/heads/master | 2023-04-07T13:36:44.794889 | 2021-12-08T15:37:45 | 2021-12-08T15:37:45 | 323,445,511 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,966 | py | #!/usr/bin/env python
import numpy as np
import argparse
from load_data import load_data
from MulticoreTSNE import MulticoreTSNE as TSNE
try:
import cuml
CUML_FLAG=True
except:
print('[DO_TSNE] WARNING failed to import cuML. GPU accelerated TSNE will not be available.')
CUML_FLAG=False
"""
Modules have two modes: standalone from command line and pipelined
Both modes accept a preprocessed AnnData object as input.
Standalone mode writes back a AnnData with new metadata
Pipelined mode returns the AnnData object with new metadata
UMAPs with /umap-learn/cuML GPU-accelerated UMAP implementation
https://umap-learn.readthedocs.io/en/latest/
https://github.com/lmcinnes/umap
"""
def do_tsne(adata, ARGS):
latent = adata.obsm[ARGS.latent_key]
if ARGS.gpu and CUML_FLAG:
print('[DO_TSNE] Using cuML GPU-accelerated TSNE')
umap_class = cuml.UMAP
if ARGS.metric != 'euclidean':
print('[DO_TSNE] cuML TSNE requres euclidean distance metric.')
emb = cuml.TSNE(
perplexity = ARGS.perplexity,
learning_rate = ARGS.learning_rate,
early_exaggeration = ARGS.early_exaggeration,
).fit_transform(latent)
else:
print('[DO_TSNE] Using MulticoreTSNE')
emb = TSNE( perplexity = ARGS.perplexity,
metric = ARGS.metric,
verbose = False, n_jobs=ARGS.n_jobs).fit_transform(latent)
print(f'[DO_TSNE] placing embedding {emb.shape} in key {ARGS.tsne_key}')
adata.obsm[ARGS.tsne_key] = emb
print(f'[DO_TSNE] recording tSNE args')
adata.uns['tSNE_args'] = ARGS.__dict__
return adata
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str)
parser.add_argument('--latent_key', default='X_scVI_vanilla', type=str,
help = 'Key in adata.obsm to use as features for tsne.')
parser.add_argument('--tsne_key', default='X_scVI_tsne_vanilla', type=str,
help = 'Key in adata.obsm to save tsne embedding.')
parser.add_argument('--gpu', action='store_true',
help = 'Whether to use GPU-accelerated tsne via RapidsAI \
and the cuML library. ')
parser.add_argument('-j', '--n_jobs', default=12, type=int,
help = 'Number of jobs for MulticoreTSNE')
parser.add_argument('--perplexity', default=20, type=int)
parser.add_argument('--learning_rate', default=200., type=float)
parser.add_argument('--n_iter', default=1000, type=int)
parser.add_argument('--metric', default='euclidean', type=str)
parser.add_argument('--early_exaggeration', default=12, type=float)
parser.add_argument('--output_adata', default=None, type=str,
help = 'Path to save.')
ARGS = parser.parse_args()
adata = load_data(ARGS.dataset)
adata = do_tsne(adata, ARGS)
if ARGS.output_adata is not None:
print(f'[DO_TSNE] Writing to {ARGS.output_adata}')
adata.write(ARGS.output_adata)
| [
"[email protected]"
]
| |
14e1a228d0680642f41d17ebeaa1552a75c5e0c5 | 1aa0ddb70fb893a6f958841b0a606cdcac954e18 | /settings/forms/batches.py | a1931328efe9b17262c46d238776331d3278fa66 | []
| no_license | shitalluitel/LibraryManagementSystem | 3042860a70096bf3821299fb10ca35958e680f62 | eecd909b272ad7e524a031c9142d22a356141fda | refs/heads/master | 2023-02-17T06:42:19.044516 | 2021-01-10T14:52:18 | 2021-01-10T14:52:18 | 166,533,846 | 2 | 1 | null | 2023-02-07T22:14:35 | 2019-01-19T10:22:41 | HTML | UTF-8 | Python | false | false | 1,003 | py | from django import forms
from django.forms import ModelMultipleChoiceField
from settings.models import Batch, CourseBatch, Course
class BatchForm(forms.ModelForm):
class Meta:
model = Batch
fields = ['name', 'code']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'code': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'name': 'Batch Name',
'code': 'Batch Code',
}
class CourseBatchCreateForm(forms.Form):
course = forms.ModelMultipleChoiceField(
queryset=Course.objects.all(),
label="Choose courses for this batch."
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#
# self.fields['course'] = ModelMultipleChoiceField(queryset=Course.objects.all())
self.fields['course'].widget.attrs['class'] = 'form-control'
self.fields['course'].empty_label = "Choose a countries"
| [
"[email protected]"
]
| |
5f01b00fb146fec0d23c878194633081590499e0 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/gui/scaleform/framework/entities/abstract/tooltipmgrmeta.py | a6dfedb52bce9035a795727e60fc365f096a4dbc | []
| no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,329 | py | # 2016.05.01 15:22:59 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/framework/entities/abstract/ToolTipMgrMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIModule import BaseDAAPIModule
class ToolTipMgrMeta(BaseDAAPIModule):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIModule
null
"""
def onCreateComplexTooltip(self, tooltipId, stateType):
"""
:param tooltipId:
:param stateType:
:return :
"""
self._printOverrideError('onCreateComplexTooltip')
def onCreateTypedTooltip(self, type, args, stateType):
"""
:param type:
:param args:
:param stateType:
:return :
"""
self._printOverrideError('onCreateTypedTooltip')
def as_showS(self, tooltipData, linkage):
"""
:param tooltipData:
:param linkage:
:return :
"""
if self._isDAAPIInited():
return self.flashObject.as_show(tooltipData, linkage)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\framework\entities\abstract\tooltipmgrmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:22:59 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
85d95bdfbd59a153f246c62bca01d14bff2342be | 8382f4ec907950a8cfc618d3cceb97b0d00ab478 | /6kyu/encryptThis.py | 98249bc9ece7063bffc8fcf98db0cc716a54aaba | []
| no_license | naistangz/codewars_challenges | 80788f3869a4283c89ee2a05f19142b18ba4820c | 372bbb6f1668b378183a169206526b52315107a8 | refs/heads/master | 2023-04-14T11:52:31.412554 | 2021-04-25T09:39:03 | 2021-04-25T09:39:03 | 299,615,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | def encrypt_this(text):
words = text.split(" ")
res = []
for i in words:
new = ""
temp = ""
for j in range(len(i)):
if j == 0:
new += str(ord(i[j]))
elif j == 1:
temp = i[j]
new += i[-1]
elif j == len(i) - 1:
new += temp
else:
new += i[j]
res.append(new)
return " ".join(list(filter(None, res))) | [
"[email protected]"
]
| |
7c8e4675d0711026385f5328533e7c8eeb8fad4d | 56db1ccba3f8976b2df6d97c99e5aae7108149a1 | /spending/main/admin.py | 2c410651c1a51abbb5f05621793ae519229eae80 | []
| no_license | peterbe/django-spending | 4d60b7a77250fc58eb7a397e388fd22fe73576de | ab2ab1730fbdd999e5ef8d75575795fa3a48d2b9 | refs/heads/master | 2021-01-10T05:32:00.005607 | 2013-07-06T05:41:41 | 2013-07-06T05:41:41 | 8,384,613 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from django.contrib import admin
from spending.main.models import Household, Expense, Category
class HouseholdAdmin(admin.ModelAdmin):
list_display = ('name', 'no_users')
def no_users(self, obj):
return obj.users.all().count()
no_users.short_description = '# users'
class ExpenseAdmin(admin.ModelAdmin):
list_display = ('amount', 'date', 'user', 'category')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Household, HouseholdAdmin)
admin.site.register(Expense, ExpenseAdmin)
admin.site.register(Category, CategoryAdmin)
| [
"[email protected]"
]
| |
11ef2cc4fb52774a2fb7d480df6720fc9c79afd9 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /02_Statistical_Methods_for_Machine_Learning/14/01_tolerance.py | 4a81741857f5d7f81dd597a2d99ba09c2f2bae3b | []
| no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | # parametric tolerance interval
from numpy import mean
from numpy import sqrt
from numpy.random import randn
from numpy.random import seed
from scipy.stats import chi2
from scipy.stats import norm
# seed the random number generator
seed(1)
# generate dataset
data = 5 * randn(100) + 50
# specify degrees of freedom
n = len(data)
dof = n - 1
# specify data coverage
prop = 0.95
prop_inv = (1.0 - prop) / 2.0
gauss_critical = norm.ppf(prop_inv)
print('Gaussian critical value: %.3f (coverage=%d%%)' % (gauss_critical, prop * 100))
# specify confidence
prob = 0.99
prop_inv = 1.0 - prob
chi_critical = chi2.ppf(prop_inv, dof)
print('Chi-Squared critical value: %.3f (prob=%d%%, dof=%d)' % (chi_critical, prob * 100, dof))
# tolerance
interval = sqrt((dof * (1 + (1 / n)) * gauss_critical ** 2) / chi_critical)
print('Tolerance Interval: %.3f' % interval)
# summarize
data_mean = mean(data)
lower, upper = data_mean - interval, data_mean + interval
print('%.2f to %.2f covers %d%% of data with a confidence of %d%%' % (lower, upper, prop * 100, prob * 100))
| [
"[email protected]"
]
| |
4fdc6b0d3c0d6e664d22960a9926a3b2127f2f29 | 753a70bc416e8dced2853f278b08ef60cdb3c768 | /models/research/deeplab/datasets/build_cityscapes_data.py | ce81baef20a460abaa634d3f1dcb6760a0858dec | [
"MIT",
"Apache-2.0"
]
| permissive | finnickniu/tensorflow_object_detection_tflite | ef94158e5350613590641880cb3c1062f7dd0efb | a115d918f6894a69586174653172be0b5d1de952 | refs/heads/master | 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 | MIT | 2023-03-25T00:31:18 | 2019-12-30T09:58:41 | C++ | UTF-8 | Python | false | false | 6,244 | py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Cityscapes data to TFRecord file format with Example protos.
The Cityscapes dataset is expected to have the following directory structure:
+ cityscapes
- build_cityscapes_data.py (current working directiory).
- build_data.py
+ cityscapesscripts
+ annotation
+ evaluation
+ helpers
+ preparation
+ viewer
+ gtFine
+ train
+ val
+ test
+ leftImg8bit
+ train
+ val
+ test
+ tfrecord
This script converts data into sharded data files and save at tfrecord folder.
Note that before running this script, the users should (1) register the
Cityscapes dataset website at https://www.cityscapes-dataset.com to
download the dataset, and (2) run the script provided by Cityscapes
`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth.
Also note that the tensorflow model will be trained with `TrainId' instead
of `EvalId' used on the evaluation server. Thus, the users need to convert
the predicted labels to `EvalId` for evaluation on the server. See the
vis.py for more details.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import math
import os.path
import re
import sys
import build_data
from six.moves import range
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('cityscapes_root',
'./cityscapes',
'Cityscapes dataset root folder.')
tf.app.flags.DEFINE_string(
'output_dir',
'./tfrecord',
'Path to save converted SSTable of TensorFlow examples.')
_NUM_SHARDS = 10
# A map from data type to folder name that saves the data.
_FOLDERS_MAP = {
'image': 'leftImg8bit',
'label': 'gtFine',
}
# A map from data type to filename postfix.
_POSTFIX_MAP = {
'image': '_leftImg8bit',
'label': '_gtFine_labelTrainIds',
}
# A map from data type to data format.
_DATA_FORMAT_MAP = {
'image': 'png',
'label': 'png',
}
# Image file pattern.
_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image'])
def _get_files(data, dataset_split):
"""Gets files for the specified data type and dataset split.
Args:
data: String, desired data ('image' or 'label').
dataset_split: String, dataset split ('train', 'val', 'test')
Returns:
A list of sorted file names or None when getting label for
test set.
"""
if data == 'label' and dataset_split == 'test':
return None
pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data])
search_files = os.path.join(
FLAGS.cityscapes_root, _FOLDERS_MAP[data], dataset_split, '*', pattern)
filenames = glob.glob(search_files)
return sorted(filenames)
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, val).
Raises:
RuntimeError: If loaded image and label have different shape, or if the
image file with specified postfix could not be found.
"""
image_files = _get_files('image', dataset_split)
label_files = _get_files('label', dataset_split)
num_images = len(image_files)
num_per_shard = int(math.ceil(num_images / _NUM_SHARDS))
image_reader = build_data.ImageReader('png', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
shard_filename = '%s-%05d-of-%05d.tfrecord' % (
dataset_split, shard_id, _NUM_SHARDS)
output_filename = os.path.join(FLAGS.output_dir, shard_filename)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, num_images, shard_id))
sys.stdout.flush()
# Read the image.
image_data = tf.gfile.FastGFile(image_files[i], 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
re_match = _IMAGE_FILENAME_RE.search(image_files[i])
if re_match is None:
raise RuntimeError('Invalid image filename: ' + image_files[i])
filename = os.path.basename(re_match.group(1))
example = build_data.image_seg_to_tfexample(
image_data, filename, height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
# Only support converting 'train' and 'val' sets for now.
for dataset_split in ['train', 'val']:
_convert_dataset(dataset_split)
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
]
| |
36427016924bc734286ed9ff39b3812b2d38b21a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2367/60699/251530.py | e61d0d3cb0da03640cd9f10d895c7a604b12880b | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | res1=1
list1=[1]
for i in range(0,30):
res1=res1*10+1
list1.append(res1)
n=int(input())
for i in list1:
if i%n==0:
print(i)
break
print(-1) | [
"[email protected]"
]
| |
6c1288b99b5652fc745dbe1c2daa5fa84a0b459f | cd91dd22b391968e077fd0a693813893543cdf1f | /src/opserver/consistent_schdlr.py | d4dee2598c34380e1ab5750983d488d72b64a7fd | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
]
| permissive | forsakening/controller | 6c89476901d78423721b4b444d54123630eaa300 | 408892746ea189aecae33ce3b5a8ac6c1200a8d8 | refs/heads/master | 2020-04-08T12:08:27.204902 | 2019-02-13T08:56:03 | 2019-02-13T08:56:03 | 159,334,442 | 0 | 1 | Apache-2.0 | 2019-01-31T01:44:29 | 2018-11-27T12:48:27 | C++ | UTF-8 | Python | false | false | 10,996 | py | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from consistent_hash import ConsistentHash
import gevent
import os
import hashlib
import logging
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.handlers.gevent import SequentialGeventHandler
from random import randint
import struct
import traceback
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
ConnectionType
class ConsistentScheduler(object):
'''
LibPartitionHelper abstract out workers and work_items, and their
mapping to partitions. So application can only deal with the work
items it owns, without bothering about partition mapping.
This class also provides syncronization premitives to ensure apps
to clean up b4 giving up their partitions
'''
_MAX_WAIT_4_ALLOCATION = 6 + randint(0, 9)
def __init__(self, service_name=None, zookeeper='127.0.0.1:2181',
delete_hndlr=None, add_hndlr=None, bucketsize=47,
item2part_func=None, partitioner=None, logger=None,
cluster_id=''):
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(__name__)
self._service_name = service_name or os.path.basename(sys.argv[0])
self._item2part_func = item2part_func or self._device2partition
self._zookeeper_srvr = zookeeper
self._zk = None
self._bucketsize = bucketsize
self._delete_hndlr = delete_hndlr
self._add_hndlr = add_hndlr
self._partitioner = partitioner or self._partitioner_func
self._partitions = {}
self._con_hash = None
self._last_log = ''
self._last_log_cnt = 0
self._partition_set = map(str, range(self._bucketsize))
self._cluster_id = cluster_id
if self._cluster_id:
self._zk_path = '/'+self._cluster_id + '/contrail_cs' + '/'+self._service_name
else:
self._zk_path = '/'.join(['/contrail_cs', self._service_name])
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
while True:
self._logger.error("Consistent scheduler zk start")
self._zk = KazooClient(self._zookeeper_srvr,
handler=SequentialGeventHandler())
self._zk.add_listener(self._zk_lstnr)
try:
self._zk.start()
while self._conn_state != ConnectionStatus.UP:
gevent.sleep(1)
break
except Exception as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
self._zk.remove_listener(self._zk_lstnr)
try:
self._zk.stop()
self._zk.close()
except Exception as ex:
template = "Exception {0} in Consistent scheduler zk stop/close. Args:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s for %s" % \
(messag, traceback.format_exc(), self._service_name))
finally:
self._zk = None
gevent.sleep(1)
self._pc = self._zk.SetPartitioner(path=self._zk_path,
set=self._partition_set,
partition_func=self._partitioner)
self._wait_allocation = 0
gevent.sleep(0)
def _sandesh_connection_info_update(self, status, message):
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnectionType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._zookeeper_srvr.split(','))
if ((self._conn_state and self._conn_state != ConnectionStatus.DOWN) and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self._supress_log(msg)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self._supress_log(msg)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
def _zk_lstnr(self, state):
self._logger.error("Consistent scheduler listen %s" % str(state))
if state == KazooState.CONNECTED:
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
self._logger.error("Consistent scheduler connection LOST")
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
os._exit(2)
elif state == KazooState.SUSPENDED:
self._logger.error("Consistent scheduler connection SUSPENDED")
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
def schedule(self, items, lock_timeout=30):
gevent.sleep(0)
ret = False
if self._pc.failed:
self._logger.error('Lost or unable to acquire partition')
os._exit(2)
elif self._pc.release:
self._supress_log('Releasing...')
self._release()
elif self._pc.allocating:
self._supress_log('Waiting for allocation...')
self._pc.wait_for_acquire(lock_timeout)
if self._wait_allocation < self._MAX_WAIT_4_ALLOCATION:
self._wait_allocation += 1
else:
self._logger.error('Giving up after %d tries!' %
(self._wait_allocation))
os._exit(2)
elif self._pc.acquired:
self._supress_log('got work: ', list(self._pc))
ret = True
self._wait_allocation = 0
self._populate_work_items(items)
self._supress_log('work items: ',
self._items2name(self.work_items()),
'from the list',
self._items2name(items))
return ret
def members(self):
return list(self._con_hash.nodes)
def partitions(self):
return list(self._pc)
def work_items(self):
return sum(self._partitions.values(), [])
def finish(self):
self._inform_delete(self._partitions.keys())
self._pc.finish()
self._zk.remove_listener(self._zk_lstnr)
gevent.sleep(1)
try:
self._zk.stop()
except:
self._logger.error("Stopping kazooclient failed")
else:
self._logger.error("Stopping kazooclient successful")
try:
self._zk.close()
except:
self._logger.error("Closing kazooclient failed")
else:
self._logger.error("Closing kazooclient successful")
def _items2name(self, items):
return map(lambda x: x.name, items)
def _supress_log(self, *s):
slog = ' '.join(map(str, s))
dl = ''
if slog != self._last_log_cnt:
if self._last_log_cnt:
dl += ' ' * 4
dl += '.' * 8
dl += '[last print repeats %d times]' % self._last_log_cnt
self._last_log_cnt = 0
dl += slog
self._last_log = slog
self._logger.debug(dl)
else:
self._last_log_cnt += 1
def _consistent_hash(self, members):
if self._con_hash is None:
self._con_hash = ConsistentHash(members)
self._logger.error('members: %s' % (str(self._con_hash.nodes)))
cur, updtd = set(self._con_hash.nodes), set(members)
if cur != updtd:
newm = updtd - cur
rmvd = cur - updtd
if newm:
self._logger.error('new members: %s' % (str(newm)))
self._con_hash.add_nodes(list(newm))
if rmvd:
self._logger.error('members left: %s' % (str(rmvd)))
self._con_hash.del_nodes(list(rmvd))
return self._con_hash
def _consistent_hash_get_node(self, members, partition):
return self._consistent_hash(members).get_node(partition)
def _partitioner_func(self, identifier, members, _partitions):
partitions = [p for p in _partitions \
if self._consistent_hash_get_node(members, p) == identifier]
self._logger.error('partitions: %s' % (str(partitions)))
return partitions
def _release(self):
old = set(self._pc)
new = set(self._partitioner(self._pc._identifier,
list(self._pc._party),
self._partition_set))
rmvd = old - new
added = new - old
if rmvd:
self._inform_delete(list(rmvd))
if added:
self._inform_will_add(list(added))
self._pc.release_set()
def _list_items_in(self, partitions):
return sum([self._partitions[k] for k in partitions if k in \
self._partitions], [])
def _inform_will_add(self, partitions):
if callable(self._add_hndlr):
self._add_hndlr(self._list_items_in(partitions))
def _inform_delete(self, partitions):
if callable(self._delete_hndlr):
self._delete_hndlr(self._list_items_in(partitions))
def _populate_work_items(self, items):
self._refresh_work_items()
for i in items:
part = str(self._item2part_func(i.name))
if part in list(self._pc):
if part not in self._partitions:
self._partitions[part] = []
if i.name not in map(lambda x: x.name,
self._partitions[part]):
self._partitions[part].append(i)
self._logger.debug('@populate_work_items(%s): done!' % ' '.join(
map(lambda v: str(v[0]) + ':' + ','.join(map(
lambda x: x.name, v[1])), self._partitions.items())))
gevent.sleep(0)
def _device2partition(self, key):
return struct.unpack('Q', hashlib.md5(key).digest(
)[-8:])[0] % self._bucketsize
def _refresh_work_items(self):
for k in self._partitions:
self._partitions[k] = []
| [
"[email protected]"
]
| |
47e5b33bf2c46dffa3df76a2bf4134619041815a | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Reconstruction/RecExample/RecExCommon/share/ContainerRemapping.py | 00f5c31d568f606d42fba403b8779c3df62a656f | []
| no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,782 | py | include.block ("RecExCommon/ContainerRemapping.py")
from AthenaCommon.AppMgr import ServiceMgr
# Instantiate the address remapping service:
if not hasattr( ServiceMgr, "AddressRemappingSvc" ):
ServiceMgr += CfgMgr.AddressRemappingSvc()
pass
if not hasattr( ServiceMgr, "ProxyProviderSvc" ):
ServiceMgr += CfgMgr.ProxyProviderSvc()
pass
ServiceMgr.ProxyProviderSvc.ProviderNames += [ "AddressRemappingSvc" ]
# Declare the name conversion rules:
ServiceMgr.AddressRemappingSvc.TypeKeyOverwriteMaps += [
"xAOD::ElectronContainer#ElectronCollection->"
"xAOD::ElectronContainer#Electrons",
"xAOD::ElectronAuxContainer#ElectronCollectionAux.->"
"xAOD::ElectronAuxContainer#ElectronsAux.",
"xAOD::ElectronContainer#FwdElectrons->"
"xAOD::ElectronContainer#ForwardElectrons",
"xAOD::ElectronAuxContainer#FwdElectronsAux.->"
"xAOD::ElectronAuxContainer#ForwardElectronsAux.",
"xAOD::PhotonContainer#PhotonCollection->"
"xAOD::PhotonContainer#Photons",
"xAOD::PhotonAuxContainer#PhotonCollectionAux.->"
"xAOD::PhotonAuxContainer#PhotonsAux.",
"xAOD::CaloClusterContainer#egClusterCollection->"
"xAOD::CaloClusterContainer#egammaClusters",
"xAOD::CaloClusterAuxContainer#egClusterCollectionAux.->"
"xAOD::CaloClusterAuxContainer#egammaClustersAux.",
"xAOD::CaloClusterContainer#LArClusterEMFrwd->"
"xAOD::CaloClusterContainer#ForwardElectronClusters",
"xAOD::CaloClusterAuxContainer#LArClusterEMFrwdAux.->"
"xAOD::CaloClusterAuxContainer#ForwardElectronClustersAux.",
"xAOD::TrackParticleContainer#InDetTrackParticlesForward->"
"xAOD::TrackParticleContainer#InDetForwardTrackParticles",
"xAOD::TrackParticleAuxContainer#InDetTrackParticlesForwardAux.->"
"xAOD::TrackParticleAuxContainer#InDetForwardTrackParticlesAux.",
"xAOD::TrackParticleContainer#InDetTrackParticlesLowBeta->"
"xAOD::TrackParticleContainer#InDetLowBetaTrackParticles",
"xAOD::TrackParticleAuxContainer#InDetTrackParticlesLowBetaAux.->"
"xAOD::TrackParticleAuxContainer#InDetLowBetaTrackParticlesAux.",
"xAOD::TauJetContainer#TauRecContainer->"
"xAOD::TauJetContainer#TauJets",
"xAOD::TauJetAuxContainer#TauRecContainerAux.->"
"xAOD::TauJetAuxContainer#TauJetsAux.",
"xAOD::CaloClusterContainer#TauPi0ClusterContainer->"
"xAOD::CaloClusterContainer#TauPi0Clusters",
"xAOD::CaloClusterAuxContainer#TauPi0ClusterContainerAux.->"
"xAOD::CaloClusterAuxContainer#TauPi0ClustersAux.",
"xAOD::VertexContainer#TauSecondaryVertexContainer->"
"xAOD::VertexContainer#TauSecondaryVertices",
"xAOD::VertexAuxContainer#TauSecondaryVertexContainerAux.->"
"xAOD::VertexAuxContainer#TauSecondaryVerticesAux.",
"xAOD::PFOContainer#TauShotPFOContainer->"
"xAOD::PFOContainer#TauShotParticleFlowObjects",
"xAOD::PFOAuxContainer#TauShotPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauShotParticleFlowObjectsAux.",
"xAOD::PFOContainer#TauPi0ChargedPFOContainer->"
"xAOD::PFOContainer#TauChargedParticleFlowObjects",
"xAOD::PFOAuxContainer#TauPi0ChargedPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauChargedParticleFlowObjectsAux.",
"xAOD::PFOContainer#TauPi0NeutralPFOContainer->"
"xAOD::PFOContainer#TauNeutralParticleFlowObjects",
"xAOD::PFOAuxContainer#TauPi0NeutralPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauNeutralParticleFlowObjectsAux.",
"xAOD::PFOContainer#chargedJetETMissPFO_eflowRec->"
"xAOD::PFOContainer#JetETMissChargedParticleFlowObjects",
"xAOD::PFOAuxContainer#chargedJetETMissPFO_eflowRecAux.->"
"xAOD::PFOAuxContainer#JetETMissChargedParticleFlowObjectsAux.",
"xAOD::PFOContainer#neutralJetETMissPFO_eflowRec->"
"xAOD::PFOContainer#JetETMissNeutralParticleFlowObjects",
"xAOD::PFOAuxContainer#neutralJetETMissPFO_eflowRecAux.->"
"xAOD::PFOAuxContainer#JetETMissNeutralParticleFlowObjectsAux.",
"xAOD::CaloClusterContainer#CaloCalTopoCluster->"
"xAOD::CaloClusterContainer#CaloCalTopoClusters",
"xAOD::CaloClusterAuxContainer#CaloCalTopoClusterAux.->"
"xAOD::CaloClusterAuxContainer#CaloCalTopoClustersAux.",
"xAOD::TruthEventContainer#TruthEvent->"
"xAOD::TruthEventContainer#TruthEvents",
"xAOD::TruthEventAuxContainer#TruthEventAux.->"
"xAOD::TruthEventAuxContainer#TruthEventsAux.",
"xAOD::TruthParticleContainer#TruthParticle->"
"xAOD::TruthParticleContainer#TruthParticles",
"xAOD::TruthParticleAuxContainer#TruthParticleAux.->"
"xAOD::TruthParticleAuxContainer#TruthParticlesAux.",
"xAOD::TruthVertexContainer#TruthVertex->"
"xAOD::TruthVertexContainer#TruthVertices",
"xAOD::TruthVertexAuxContainer#TruthVertexAux.->"
"xAOD::TruthVertexAuxContainer#TruthVerticesAux."
]
| [
"[email protected]"
]
| |
70b13e09918671ec8f42febe6f91674c2a84f798 | d4f2e2e3552ab4b111f78cfbad0d30c144201093 | /2016-12-20/semaphore.py | 2c1f4492110d89b3a3f1daa84001456b57596e8d | [
"Apache-2.0"
]
| permissive | dongweiming/mp | c1e9f6f2c1fd8adbd4d7b8ffc45c5cc288cdcd80 | 129c31c818e1f0c39c983aad1f2f1ad9fa7efb1c | refs/heads/master | 2023-04-29T07:56:27.198574 | 2022-10-30T04:20:09 | 2022-10-30T04:21:27 | 75,051,758 | 96 | 35 | Apache-2.0 | 2023-04-17T17:34:17 | 2016-11-29T06:44:53 | Python | UTF-8 | Python | false | false | 509 | py | import aiohttp
import asyncio
NUMBERS = range(12)
URL = 'http://httpbin.org/get?a={}'
sema = asyncio.Semaphore(3)
async def fetch_async(a):
async with aiohttp.request('GET', URL.format(a)) as r:
data = await r.json()
return data['args']['a']
async def print_result(a):
with (await sema):
r = await fetch_async(a)
print('fetch({}) = {}'.format(a, r))
loop = asyncio.get_event_loop()
f = asyncio.wait([print_result(num) for num in NUMBERS])
loop.run_until_complete(f)
| [
"[email protected]"
]
| |
3d2cdb0df6994ed18122a7d3e04ebebc15aee7da | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnuiy.py | d0fcd3f412294aeefbace25ce9b8a1fa5cf588aa | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('MedwTAI2.py', 1), ('MereHHB.py', 1)] | [
"[email protected]"
]
| |
b097b7a2e91b91ea67969ca245e6a9c69ad4bc7f | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Selenium_Chromium/source/selenium/webdriver/edge/service.py | 9eac51171035f1d2bd648ca409aeee7b8c69b782 | [
"MIT"
]
| permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,161 | py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
def __init__(self, executable_path, port=0, verbose=False, log_path=None):
"""
Creates a new instance of the EdgeDriver service.
EdgeDriver provides an interface for Microsoft WebDriver to use
with Microsoft Edge.
:param executable_path: Path to the Microsoft WebDriver binary.
:param port: Run the remote service on a specified port.
Defaults to 0, which binds to a random open port of the
system's choosing.
:verbose: Whether to make the webdriver more verbose (passes the
--verbose option to the binary). Defaults to False.
:param log_path: Optional path for the webdriver binary to log to.
Defaults to None which disables logging.
"""
self.service_args = []
if verbose:
self.service_args.append("--verbose")
params = {
"executable": executable_path,
"port": port,
"start_error_message": "Please download from http://go.microsoft.com/fwlink/?LinkId=619687"
}
if log_path:
params["log_file"] = open(log_path, "a+")
service.Service.__init__(self, **params)
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
| [
"[email protected]"
]
| |
fe0e6cff95e5d8a330eff9257815093428fb3c63 | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /test/test_od_mcomplex_type_definition_range_check.py | 1654fc81fa59cb3e96dcfdc2ece04a4d325049a1 | []
| no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import rcc
from rcc.models.od_mcomplex_type_definition_range_check import ODMcomplexTypeDefinitionRangeCheck # noqa: E501
from rcc.rest import ApiException
class TestODMcomplexTypeDefinitionRangeCheck(unittest.TestCase):
"""ODMcomplexTypeDefinitionRangeCheck unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ODMcomplexTypeDefinitionRangeCheck
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = rcc.models.od_mcomplex_type_definition_range_check.ODMcomplexTypeDefinitionRangeCheck() # noqa: E501
if include_optional :
return ODMcomplexTypeDefinitionRangeCheck(
check_value = [
rcc.models.od_mcomplex_type_definition_check_value.ODMcomplexTypeDefinitionCheckValue(
value = '0', )
],
formal_expression = [
rcc.models.od_mcomplex_type_definition_formal_expression.ODMcomplexTypeDefinitionFormalExpression(
value = '0',
context = '0', )
],
measurement_unit_ref = rcc.models.od_mcomplex_type_definition_measurement_unit_ref.ODMcomplexTypeDefinitionMeasurementUnitRef(
measurement_unit_oid = '0', ),
error_message = rcc.models.od_mcomplex_type_definition_error_message.ODMcomplexTypeDefinitionErrorMessage(
translated_text = [
rcc.models.od_mcomplex_type_definition_translated_text.ODMcomplexTypeDefinitionTranslatedText(
value = '0',
lang = '0', )
], ),
comparator = 'LT',
soft_hard = 'SOFT'
)
else :
return ODMcomplexTypeDefinitionRangeCheck(
)
def testODMcomplexTypeDefinitionRangeCheck(self):
"""Test ODMcomplexTypeDefinitionRangeCheck"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
d273be6ad84e00816624c1c2db81aeb55764ad1f | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/python/ops/metrics_impl.py | 07a43d2961aef78c6af194c06eb1cd62d641f352 | [
"Apache-2.0"
]
| permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144,255 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
def _local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
"""Internal version of `remove_squeezable_dimensions` which handles weights.
Squeezes `predictions` and `labels` if their rank differs by 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight `Tensor`. It will be squeezed if its rank is 1
more than the new rank of `predictions`
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return predictions, labels, None
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return predictions, labels, weights
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1),
lambda: array_ops.expand_dims(weights, [-1]),
lambda: weights)
# Don't attempt squeeze if it will fail based on static check.
if ((weights_rank is not None) and
(not weights_shape.dims[-1].is_compatible_with(1))):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1),
maybe_squeeze_weights,
_maybe_expand_weights)
# If weights are scalar, do nothing. Otherwise, try to add or remove a
# dimension to match predictions.
weights = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0),
lambda: weights, _maybe_adjust_weights)
return predictions, labels, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' % (
labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope),
lambda: labels)
def _create_local(name, shape, collections=None, validate_shape=True,
dtype=dtypes.float32):
"""Creates a new local variable.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
collections: A list of collection names to which the Variable will be added.
validate_shape: Whether to validate the shape of the variable.
dtype: Data type of the variables.
Returns:
The created variable.
"""
# Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
collections = list(collections or [])
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype=dtype),
name=name,
trainable=False,
collections=collections,
validate_shape=validate_shape)
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return control_flow_ops.cond(
math_ops.equal(
array_ops.constant(0.0, dtype=dtypes.float64), denominator),
lambda: array_ops.constant(0.0, dtype=dtypes.float64),
lambda: math_ops.div(numerator, denominator),
name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = _create_local(
'total_confusion_matrix',
shape=[num_classes, num_classes],
dtype=dtypes.float64)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
def mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
mean_t = _safe_div(total, count, 'value')
update_op = _safe_div(update_total_op, update_count_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def accuracy(labels, predictions, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
def _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=None, includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(array_ops.reshape(
weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = _create_local('true_positives', shape=[num_thresholds])
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(
true_p, math_ops.reduce_sum(is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = _create_local('false_negatives', shape=[num_thresholds])
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(
false_n, math_ops.reduce_sum(is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = _create_local('true_negatives', shape=[num_thresholds])
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(
true_n, math_ops.reduce_sum(is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = _create_local('false_positives', shape=[num_thresholds])
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(
false_p, math_ops.reduce_sum(is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def auc(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None,
curve='ROC', name=None, summation_method='trapezoidal'):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
summation_method: Specifies the Riemann summation method used, 'trapezoidal'
[default] that applies the trapezoidal rule, 'minoring' that applies
left summation for increasing intervals and right summation for decreasing
intervals or 'majoring' that applies the opposite.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'auc', (labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' %
(curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
if summation_method == 'trapezoidal':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.),
name=name)
elif summation_method == 'minoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.minimum(y[:num_thresholds - 1], y[1:])),
name=name)
elif summation_method == 'majoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.maximum(y[:num_thresholds - 1], y[1:])),
name=name)
else:
raise ValueError('Invalid summation_method: %s' % summation_method)
# sum up the areas of all the trapeziums
auc_value = compute_auc(
values['tp'], values['fn'], values['tn'], values['fp'], 'value')
update_op = compute_auc(
update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, auc_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
def mean_absolute_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
def mean_cosine_distance(labels, predictions, dim, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(radial_diffs,
reduction_indices=[dim,],
keep_dims=True)
mean_distance, update_op = mean(radial_diffs, weights,
None,
None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def mean_per_class_accuracy(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates the mean of the per-class accuracies.
Calculates the accuracy for each class, then takes the mean of that.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_accuracy`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_per_class_accuracy'
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_accuracy: A `Tensor` representing the mean per class accuracy.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights=weights)
def compute_mean_accuracy(name):
"""Compute the mean per class accuracy via the confusion matrix."""
per_row_sum = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = per_row_sum
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
accuracies = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(accuracies, name=name)
mean_accuracy_v = compute_mean_accuracy('mean_accuracy')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_accuracy_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_accuracy_v, update_op
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'mean_iou', (predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0),
denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = array_ops.where(
math_ops.greater(num_valid_entries, 0),
math_ops.reduce_sum(iou, name=name) / num_valid_entries,
0)
return result
mean_iou_v = compute_mean_iou('mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
def mean_relative_error(labels, predictions, normalizer, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
def mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections,
updates_collections, name or 'mean_squared_error')
def mean_tensor(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total_tensor', shape=values.get_shape())
count = _create_local('count_tensor', shape=values.get_shape())
num_values = array_ops.ones_like(values)
if weights is not None:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
update_total_op = state_ops.assign_add(total, values)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def compute_mean(total, count, name):
non_zero_count = math_ops.maximum(count,
array_ops.ones_like(count),
name=name)
return math_ops.truediv(total, non_zero_count, name=name)
mean_t = compute_mean(total, count, 'value')
update_op = compute_mean(update_total_op, update_count_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def percentage_below(values, threshold, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold,
weights,
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
def _count_condition(values, weights=None, metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((
check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
def false_negatives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_negative = math_ops.logical_and(math_ops.equal(labels, True),
math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
def false_negatives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fn',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['fn'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fn'])
return values['fn'], update_ops['fn']
def false_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'false_positives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_positive = math_ops.logical_and(math_ops.equal(labels, False),
math_ops.equal(predictions, True))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
def false_positives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fp',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['fp'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fp'])
return values['fp'], update_ops['fp']
def true_negatives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tn',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['tn'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tn'])
return values['tn'], update_ops['tn']
def true_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(math_ops.equal(labels, True),
math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
def true_positives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tp',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['tp'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tp'])
return values['tp'], update_ops['tp']
def precision(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'precision', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_p, false_positives_update_op = false_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_precision(tp, fp, name):
return array_ops.where(
math_ops.greater(tp + fp, 0),
math_ops.div(tp, tp + fp),
0,
name)
p = compute_precision(true_p, false_p, 'value')
update_op = compute_precision(
true_positives_update_op, false_positives_update_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, p)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
def precision_at_thresholds(labels, predictions, thresholds,
weights=None,
metrics_collections=None,
updates_collections=None, name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(tp, fp, name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
prec = compute_precision(values['tp'], values['fp'], 'value')
update_op = compute_precision(
update_ops['tp'], update_ops['fp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, prec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
def recall(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'recall', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_n, false_negatives_update_op = false_negatives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n),
0,
name)
rec = compute_recall(true_p, false_n, 'value')
update_op = compute_recall(
true_positives_update_op, false_negatives_update_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(
name, 'true_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(
None, 'false_negatives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = sets.set_size(sets.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(
name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
return _sparse_recall_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_recall_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
Differs from `recall_at_k` in that predictions must be in the form of top `k`
class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`
for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and predictions has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name,
_at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def recall_at_thresholds(labels, predictions, thresholds,
weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(tp, fn, name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
rec = compute_recall(values['tp'], values['fn'], 'value')
update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def root_mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(
labels, predictions, weights, None, None,
name or 'root_mean_squared_error')
rmse = math_ops.sqrt(mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, rmse)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
def sensitivity_at_specificity(
labels, predictions, specificity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index],
tp[tf_index] + fn[tf_index] + kepsilon,
name)
sensitivity = compute_sensitivity_at_specificity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
update_op = compute_sensitivity_at_specificity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(
name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`,
`labels`, and the result `Tensors`. In the common case, this is [batch_size].
Each row of the results contains the average precision for that row.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if the last dimension of predictions_idx is not set.
"""
with ops.name_scope(
None, 'average_precision', (predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
if k is None:
raise ValueError('The last dimension of predictions_idx must be set.')
labels = _maybe_expand_labels(labels, predictions_idx)
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k, math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`. Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(name, 'average_precision_at_top_k',
(predictions_idx, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_top_k(
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')
update = _safe_scalar_div(total_update, max_update, name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_average_precision)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if k is invalid.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(
name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
return _streaming_sparse_average_precision_at_top_k(
labels=labels,
predictions_idx=predictions_idx,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(
None, 'false_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fp = sets.set_size(sets.set_difference(
predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.multiply(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def specificity_at_sensitivity(
labels, predictions, sensitivity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
"""Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
name)
specificity = compute_specificity_at_sensitivity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
update_op = compute_specificity_at_sensitivity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, specificity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
| [
"[email protected]"
]
| |
33daf81383f07a60e1205b82c0c803721e8fd23e | 25985aeeee54373d26a164e4cc6a014770e3ebf3 | /windows/w3af/w3af/core/ui/gtkUi/.svn/text-base/exploittab.py.svn-base | 7ad6f040980ac9f8abcf7cb7387f8c80945f7141 | []
| no_license | sui84/tools | 4b750dae90940fbe3a226cba72dc071d8fb88b7c | 651cc08eb50199ce1044c684dbf714ea26df6432 | refs/heads/master | 2021-01-22T19:22:26.964580 | 2017-08-20T15:23:38 | 2017-08-20T15:23:38 | 100,774,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 29,792 | '''
exploittab.py
Copyright 2007 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import gtk, gobject
from . import prompt, helpers, entries, confpanel
from core.ui.gtkUi.pluginEditor import pluginEditor
import core.data.kb.knowledgeBase as kb
from core.data.kb.vuln import vuln as vulnType
from core.controllers.w3afException import w3afException, w3afMustStopException
import operator
class Shells(gtk.TreeView):
'''The list of shells produced from vulnerabilities.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
self.w3af = w3af
# create the ListStore, with the shell name and id
self.liststore = gtk.ListStore(str, str)
self.listholder = {}
# create the TreeView using liststore
super(Shells,self).__init__(self.liststore)
# create a TreeViewColumn for the text
tvcolumn = gtk.TreeViewColumn('Shells')
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 0)
self.append_column(tvcolumn)
self.connect('row-activated', self.useShell)
gobject.timeout_add(500, self._update)
self.show()
def _update(self):
'''Updates the list of shells.
@return: True, to keep gobject.timeout_add calling it.
'''
shells = kb.kb.getAllShells()
for shell in shells:
shellid = str(id(shell))
if shellid not in self.listholder:
try:
self.liststore.append([str(shell), shellid])
except w3afException, w3:
msg = _("An error ocurren while generating the shell object: ") + str(w3)
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, msg)
dlg.destroy()
# I always perform this because I just want to be warned once
self.listholder[shellid] = shell
return True
def useShell(self, treeview, path, view_column):
'''Raises a prompt dialog to use the shell.'''
shellid = self.liststore[path][1]
shell = self.listholder[shellid]
try:
title = "Shell - " + shell.getRemoteSystem()
except w3afException, w3:
msg = _("Failed to get the remote system name from the shell object.\n")
msg += _("Original exception: ") + str(w3)
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, msg)
dlg.destroy()
else:
promptText = shell.getRemoteUser()+'@'+shell.getRemoteSystemName()
prompt.PromptDialog( title, promptText, shell.generic_user_input)
class ExploitAllDialog(gtk.Dialog):
'''A dialog with the About information.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
super(ExploitAllDialog,self).__init__("Multiple Exploit", None, gtk.DIALOG_MODAL,
(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_EXECUTE,gtk.RESPONSE_OK))
self.liststore = gtk.ListStore(str, gobject.TYPE_BOOLEAN)
# just build the tree with the plugin names
for plugin in sorted(w3af.getPluginList("attack")):
self.liststore.append([plugin, 1])
# create the TreeView using liststore
treeview = gtk.TreeView(self.liststore)
self.vbox.pack_start(treeview)
# create a TreeViewColumn for the text
tvcolumn = gtk.TreeViewColumn(_('Exploits'))
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 0)
treeview.append_column(tvcolumn)
# create a TreeViewColumn for the checkbox
tvcolumn = gtk.TreeViewColumn(_('Active'))
cell = gtk.CellRendererToggle()
cell.set_property('activatable', True)
cell.connect('toggled', self._toggle)
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'active', 1)
treeview.append_column(tvcolumn)
# stop on first
self.but_sof = gtk.CheckButton(_("First successful"))
if hasattr(self.but_sof, "set_tooltip_text"):
self.but_sof.set_tooltip_text(_("Stop on first successful exploit"))
self.vbox.pack_start(self.but_sof)
# the cancel button
but = self.action_area.get_children()[1]
but.connect("clicked", lambda x: self.destroy())
# the ok button
but = self.action_area.get_children()[0]
but.connect("clicked", self._ok)
self.connect("delete-event", lambda x,y: self.destroy())
self.activatedPlugins = None
self.stopOnFirst = None
self.show_all()
def _ok(self, w):
'''Collects the information.'''
self.activatedPlugins = [name for (name,act) in self.liststore if act]
self.stopOnFirst = self.but_sof.get_active()
self.destroy()
def _toggle(self, cell, path):
'''Toggles the plugin on/off.
@param cell: the cell that generated the signal.
@param path: the path that clicked the user.
'''
listrow = self.liststore[path]
listrow[1] = not listrow[1]
class ExploitTree(gtk.TreeView):
'''A list showing all the plugins of "attack" type.
@param w3af: The main core class.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
self.w3af = w3af
# create the ListStore, with the plugin name twice (the first could
# go bold, the second is the original name always)
self.liststore = gtk.ListStore(str, str)
# just build the tree with the plugin names
for plugin in sorted(w3af.getPluginList("attack")):
self.liststore.append([plugin, plugin])
# we will not ask for the plugin instances until needed, we'll
# keep them here:
self.plugin_instances = {}
# create the TreeView using liststore
super(ExploitTree,self).__init__(self.liststore)
# signals
self.connect('button-release-event', self.popup_menu)
self.connect('cursor-changed', self._changedSelection)
# create a TreeViewColumn for the text
tvcolumn = gtk.TreeViewColumn(_('Exploits'))
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'markup', 0)
self.append_column(tvcolumn)
# drag and drop setup, this is the SOURCE
target = [("explot-activ", 0, 1)]
self.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, target, gtk.gdk.ACTION_COPY)
#self.set_enable_tree_lines(True)
self.show()
def setFilter(self, vuln):
new_liststore = gtk.ListStore(str, str)
for pname in sorted(self.w3af.getPluginList("attack")):
exploit = self.w3af.getPluginInstance(pname, "attack")
thisvulns = getExploitableVulns(exploit)
markedname = ("<b>%s</b>" % pname) if vuln in thisvulns else pname
new_liststore.append([markedname, pname])
self.set_model(new_liststore)
self.liststore = new_liststore
def _changedSelection(self, *w):
'''Changed which exploit is selected.'''
exploit = self.getSelectedExploit()
self.vulnerabs.setFilter(exploit)
# un-bold the rest
for row in self.liststore:
if row[1] != exploit.pname:
row[0] = row[1]
def getSelectedExploit(self):
'''Returns the selected exploit.
@return: The selected exploit.
'''
(path, column) = self.get_cursor()
if path is None:
return None
# Get the information about the click
plugin = self.getPluginInstance(path)
return plugin
def popup_menu( self, tv, event ):
'''Shows a menu when you right click on a plugin.
@param tv: the treeview.
@parameter event: The GTK event
'''
if event.button != 3:
return
(path, column) = tv.get_cursor()
# Is it over a plugin name ?
if path is not None and len(path) == 1:
# Get the information about the click
plugin = self.getPluginInstance(path)
pname = self.liststore[path][1]
# Ok, now I show the popup menu !
# Create the popup menu
gm = gtk.Menu()
# And the items
e = gtk.MenuItem(_("Edit plugin..."))
e.connect('activate', self._handleEditPluginEvent, pname, path)
gm.append( e )
e = gtk.MenuItem(_("Configure plugin..."))
e.connect('activate', self._configureExploit, plugin, pname)
gm.append( e )
e = gtk.MenuItem(_("Exploit ALL vulns"))
e.connect('activate', self._exploitAll, pname, False)
gm.append( e )
e = gtk.MenuItem(_("Exploit all until first successful"))
e.connect('activate', self._exploitAll, pname, True)
gm.append( e )
gm.show_all()
gm.popup( None, None, None, event.button, event.time)
def _handleEditPluginEvent(self, widget, pluginName, path):
'''
I get here when the user right clicks on a plugin name, then he clicks on "Edit..."
This method calls the plugin editor with the corresponding parameters.
'''
def f(t, n):
self._finishedEditingPlugin(path, pluginName)
pluginEditor("attack", pluginName, f)
def _finishedEditingPlugin(self, path, pluginName):
'''
This is a callback that is called when the plugin editor finishes.
'''
del self.plugin_instances[path]
self.w3af.reloadModifiedPlugin('attack', pluginName)
def _exploitAll(self, widget, pname, stoponfirst):
'''Exploit all the vulns.'''
effectivelyExploitAll(self.w3af, [pname], stoponfirst)
def _configureExploit(self, widget, plugin, pname):
'''Configure the exploit plugin.'''
title = "Configure " + pname
confpanel.ConfigDialog(title, self.w3af, plugin, showDesc=True)
def getPluginInstance(self, path):
'''Caches the plugin instance.
@param path: where the user is in the plugin list
@return The plugin
'''
try:
return self.plugin_instances[path]
except KeyError:
pass
# path can be a tuple of one or two values here
pname = self.liststore[path][1]
plugin = self.w3af.getPluginInstance(pname, "attack")
plugin.pname = pname
plugin.ptype = "attack"
self.plugin_instances[path] = plugin
return plugin
class VulnerabList(gtk.TreeView):
'''A tree showing all the found vulnerabilities.
@param w3af: The w3af core.
@param exploitlist: The widget that keeps the list of exploits
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af, exploitlist):
self.w3af = w3af
self.exploitlist = exploitlist
# simple empty List Store
# columns: the string to show, the string to order, the key
# for the plugin instance, and the icon
self.liststore = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf)
gtk.TreeView.__init__(self, self.liststore)
# the text & icon column
tvcolumn = gtk.TreeViewColumn(_("Vulnerabilities"))
cell = gtk.CellRendererPixbuf()
tvcolumn.pack_start(cell, expand=False)
tvcolumn.add_attribute(cell, "pixbuf", 3)
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, expand=True)
tvcolumn.add_attribute(cell, "markup", 0)
self.append_column(tvcolumn)
# here we will hold the instances, the key will be stored in the store
self.instances = {}
self.listholder = set()
# initial filters
self.applicable = []
# drag and drop setup, this is the DESTINATION
target = [("explot-activ", 0, 1)]
self.enable_model_drag_dest(target, gtk.gdk.ACTION_COPY)
self.connect("drag-data-received", self._dragDropped)
self.connect('cursor-changed', self._changedSelection)
# get the knowledge base and go live
self.fullkb = kb.kb.dump()
gobject.timeout_add(500, self._updateList)
self.lastcheck = False
self.show()
def _changedSelection(self, *w):
'''Changed which exploit is selected.'''
(path, column) = self.get_cursor()
vuln = self.getInstance(path)
self.exploitlist.setFilter(vuln)
# un-bold the rest
selected = vuln.getName()
for row in self.liststore:
if row[1] != selected:
row[0] = row[1]
def setFilter(self, exploit):
'''Sets a new filter and update the list.
@param active: which types should be shown.
'''
vulns = getExploitableVulns(exploit)
if vulns is None:
self.applicable = []
else:
self.applicable = vulns
new_liststore = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf)
new_listholder = set()
self._updateList(new_liststore, new_listholder)
self.set_model(new_liststore)
self.liststore = new_liststore
self.listholder = new_listholder
def _filterKB(self):
'''Calculates the difference between the KB and the list.
This way, only is added to the list those nodes that are new.
@return: The filtered KB.
'''
# let's filter the real kb, to see what we should add
filteredkb = []
# iterate the first layer, plugin names
for pluginname, plugvalues in self.fullkb.items():
# iterate the second layer, variable names
for variabname, variabobjects in plugvalues.items():
# iterate the third layer, the variable objects
if isinstance(variabobjects, list):
for obj in variabobjects:
if type(obj) == vulnType:
severity = obj.getSeverity()
filteredkb.append((obj, severity))
return filteredkb
def _getBestObjName(self, obj):
'''
@return: The best obj name possible
'''
if hasattr(obj, "getName"):
realname = obj.getName()
else:
realname = repr(obj)
if obj in self.applicable:
showname = "<b>%s</b>" % realname
else:
showname = "%s" % realname
return showname, realname
def _updateList(self, liststore=None, listholder=None):
'''Updates the GUI with the KB.
@return: True to keep being called by gobject.
'''
# if the core is not running, don't have anything to update
if not self.w3af.isRunning():
if self.lastcheck:
return True
else:
self.lastcheck = True
self.lastcheck = False
# get the filtered knowledge base info
filteredKB = self._filterKB()
if liststore is None:
liststore = self.liststore
listholder = self.listholder
new_ones = []
for obj, severity in filteredKB:
idinstance = str(id(obj))
if idinstance in listholder:
continue
# it's new!
(showname, realname) = self._getBestObjName(obj)
newicon = helpers.KB_ICONS.get(("vuln", severity))
if newicon is not None:
newicon = newicon.get_pixbuf()
new_ones.append(
(idinstance, obj, showname, realname, newicon))
if new_ones:
self._addVulns(listholder, liststore, new_ones)
return True
def _addVulns(self, listholder, liststore, vulns):
'''Adds an element to the liststore.
@param listholder: the holder to check for instances
@param liststore: the list itself
@param vulns: what to add
'''
# order it by realname, in reverse to be able to do nice pops
vulns.sort(key=operator.itemgetter(3), reverse=True)
# add to listholder and instances
for idinstance, obj, showname, realname, newicon in vulns:
listholder.add(idinstance)
self.instances[idinstance] = obj
# add to the liststore, inserting into the right place to keep order
storelen = len(liststore)
ind = 0
idinstance, obj, showname, realname, newicon = vulns.pop()
while ind < storelen:
prvshowname,prvrealname, vln,icn = liststore[ind]
if realname <= prvrealname:
liststore.insert(ind, (showname,realname,idinstance,newicon))
storelen += 1
try:
idinstance, obj, showname, realname, newicon = vulns.pop()
except IndexError:
break
ind += 1
else:
# we had some more, add them at the end
liststore.append((showname,realname,idinstance,newicon))
for idinstance, obj, showname, realname, newicon in vulns[::-1]:
liststore.append((showname,realname,idinstance,newicon))
def getInstance(self, path):
'''Extracts the instance from the tree.
@param path: where the user is in the tree
@return The instance
'''
instanckey = self.liststore[path][2]
instance = self.instances.get(instanckey)
return instance
def _dragDropped(self, tv, drag_context, x, y, selection_data, info, timestamp):
'''Something was dropped (after a drag) on us.'''
droppoint = tv.get_dest_row_at_pos(x, y)
if droppoint is None:
return True
# collect info about source and dest
(destpath, where) = droppoint
sourcepath = self.exploitlist.get_cursor()[0]
sourcerow = self.exploitlist.liststore[sourcepath]
# it should select a destination row
if where not in (gtk.TREE_VIEW_DROP_INTO_OR_AFTER, gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
self.w3af.mainwin.sb(_("You must drop into a row, not in the middle of two"))
return
# get real objects
exploit = self.exploitlist.getPluginInstance(sourcepath)
dstvuln = self.getInstance(destpath)
if dstvuln is None:
self.w3af.mainwin.sb(_("You must select a vulnerability as destination"))
return
self._executeExploit(exploit, dstvuln)
return
def _executeExploit(self, expl, vuln):
'''Exploits a vulnerability.
This raises a text dialog that informs how the exploit
is going until it finishes.
This method is going to:
a) Create the TextDialog
b) spawn a thread to launch the exploit process
c) spawn a thread to read from the output manager queue
b and c both write messages to the TextDialog.
@param expl: the exploit to use
@param vuln: the vulnerability to exploit
'''
dlg = entries.TextDialog("Exploit!")
# Start the generator that writes the messages from output manager
console_task = helpers.write_console_messages(dlg)
gobject.idle_add(console_task.next)
# Start the generator that launches the exploit
exploit_task = self._launch_exploit(dlg, expl, vuln)
gobject.idle_add(exploit_task.next)
return
def _launch_exploit(self, dlg, expl, vuln):
'''
Launch the exploit and write messages to the TextDialog.
@parameter dlg: The TextDialog.
'''
# get the info, and see if we can go for it
dlg.addMessage("Checking suitability...\n")
vuln_id_list = vuln.getId()
yield True
try:
canexploit = expl.canExploit(vuln_id_list)
except w3afException, e:
dlg.addMessage(_("\nERROR: "))
dlg.addMessage(str(e) + '\n')
dlg.done() # set button to sensitive
dlg.dialog_run() # wait for user response
yield False
if not canexploit:
dlg.addMessage(_("Sorry, this attack plugin can not exploit this vulnerability\n"))
dlg.done() # set button to sensitive
dlg.dialog_run() # wait for user response
yield False
# ok, go for it!
dlg.addMessage(_("Ok, exploiting...\n"))
yield True
try:
expl.exploit()
yield True # print the console messages to the dialog
except w3afException, e:
dlg.addMessage(str(e) + '\n')
else:
dlg.addMessage(_("Done\n"))
yield True
dlg.done() # set button to sensitive
dlg.dialog_run() # wait for user response
yield False
class Proxies(gtk.Label):
'''Dummy class to alert that this will be done later.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self):
msg = "The 'Proxies' functionality\nwill be implemented\nin the future."
super(Proxies,self).__init__(msg)
self.set_justify(gtk.JUSTIFY_CENTER)
self.show()
def getExploitableVulns(exploit):
'''Returns the exploitable vulnerabilities.
@param exploit: the exploit to search.
'''
try:
vulns = exploit.getExploitableVulns()
except w3afException:
print "WARNING: The %r exploit has no getExploitableVulns method!" % exploit
vulns = []
return vulns
def effectivelyExploitAll(w3af, activatedPlugins, stopOnFirst):
'''Exploit all the vulnerabilities.
Just like in the 1-to-1 exploit, I'll create two generators that will perform the work
in a "threaded" way.
@param w3af: the core
@param activatedPlugins: Which plugins are to be used.
@param stopOnFirst: if the exploit should stop in the first exploited vuln.
'''
dlg = entries.TextDialog("Multiple Exploit!")
# Start the generator that writes the messages from output manager
console_task = helpers.write_console_messages(dlg)
gobject.idle_add(console_task.next)
# Start the generator that launches the exploit
exploit_task = _launch_exploit_all(dlg, w3af, activatedPlugins, stopOnFirst)
gobject.idle_add(exploit_task.next)
def _launch_exploit_all(dlg, w3af, activatedPlugins, stopOnFirst):
'''
A generator that will perform the exploitation of all the vulnerabilities.
@param dlg: The dialog where I'm going to write the messages
@param w3af: the core
@param activatedPlugins: Which plugins are to be used.
@param stopOnFirst: if the exploit should stop in the first exploited vuln.
'''
for exploitname in activatedPlugins:
dlg.addMessage(_("\nExploiting %r...\n") % exploitname)
exploit = w3af.getPluginInstance(exploitname, "attack")
vulns = getExploitableVulns(exploit)
dlg.addMessage(_(" %d vulnerabilites to exploit\n") % len(vulns))
yield True
for vuln in vulns:
# Let GTK handle events, I want a responsive GUI!
yield True
# check if o
dlg.addMessage(("Checking suitability for vuln %r...\n") % vuln.getName())
try:
canexploit = exploit.canExploit(vuln.getId())
except w3afException, e:
dlg.addMessage(_("\nERROR: "))
dlg.addMessage(str(e) + '\n')
dlg.done()
dlg.dialog_run()
yield False
except w3afMustStopException, wmse:
dlg.addMessage(_("\nERROR: "))
dlg.addMessage(str(wmse) + '\n')
dlg.done()
dlg.dialog_run()
yield False
if not canexploit:
dlg.addMessage(_(" nop\n"))
yield True
continue
dlg.addMessage(_(" ok\n"))
# exploitable, go for it!
dlg.addMessage(_("Exploiting...\n"))
try:
exploit.exploit()
except w3afException, e:
dlg.addMessage(str(e) + '\n')
yield True
continue
except w3afMustStopException, wmse:
dlg.addMessage(_("\nERROR:"))
dlg.addMessage(str(wmse) + '\n')
dlg.done()
dlg.dialog_run()
yield False
# Let GTK handle events, I want a responsive GUI!
yield True
# it was succesful!
if stopOnFirst:
dlg.addMessage(_("Done\n"))
dlg.done()
dlg.dialog_run()
yield False
dlg.addMessage(_("Done\n"))
dlg.done()
dlg.dialog_run()
yield False
class ExploitBody(entries.RememberingHPaned):
'''Body of the exploit tab.
@param w3af: the Core instance.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
super(ExploitBody,self).__init__(w3af, "pane-exploitbody")
self.w3af = w3af
self.panels = {}
# This is the index to use in the message diverter
#
# The first window that is poped up, gets 0 and starts from there
# that window consumes messages and increases this number.
#
# The next window will show messages starting from were the
# other window left the pointer.
#
# All the message_index handling is done with:
# - self.get_message_index()
# - self.inc_message_index()
#
self._message_index = 0
kb.kb.save('get_message_index', 'get_message_index', self.get_message_index)
kb.kb.save('inc_message_index', 'inc_message_index', self.inc_message_index)
# left & right
exploitvuln = self._buildExplVuln()
interac = self._buildInteraction()
self.panels["exploitvuln"] = exploitvuln
self.panels["interac"] = interac
# pack it all and show
self.pack1(exploitvuln)
self.pack2(interac)
self.panactiv = dict((x,True) for x in self.panels)
self.show()
def inc_message_index(self):
self._message_index += 1
def get_message_index(self):
return self._message_index
def _buildExplVuln(self):
'''The pane with the exploit list and vulnerabilities tree.'''
pan = entries.RememberingHPaned(self.w3af, "pane-epxlvuln", 200)
# left
exploitlist = ExploitTree(self.w3af)
scrollwin1 = gtk.ScrolledWindow()
scrollwin1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin1.add_with_viewport(exploitlist)
scrollwin1.show()
# rigth
interac = VulnerabList(self.w3af, exploitlist)
exploitlist.vulnerabs = interac
scrollwin2 = gtk.ScrolledWindow()
scrollwin2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin2.add_with_viewport(interac)
scrollwin2.show()
# pack it all and show
pan.pack1(scrollwin1)
pan.pack2(scrollwin2)
pan.show()
return pan
def _buildInteraction(self):
'''The pane with the shells and proxies list.'''
pan = entries.RememberingVPaned(self.w3af, "pane-explinteraction")
# left
shells = Shells(self.w3af)
scrollwin1 = gtk.ScrolledWindow()
scrollwin1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin1.add_with_viewport(shells)
scrollwin1.show()
# rigth
proxies = Proxies()
scrollwin2 = gtk.ScrolledWindow()
scrollwin2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin2.add_with_viewport(proxies)
scrollwin2.show()
# pack it all and show
pan.pack1(scrollwin1)
pan.pack2(scrollwin2)
pan.show()
return pan
def togglePanels(self, panel, active):
'''Turn on and off the panels.
@param panel: The panel to turn on and off
@param active: If it should be activated or deactivated
'''
widg = self.panels[panel]
if active:
widg.show()
else:
widg.hide()
self.panactiv[panel] = active
def exploitAll(self):
'''Exploit all vulns with all plugins.'''
ea = ExploitAllDialog(self.w3af)
resp = ea.run()
if resp != gtk.RESPONSE_OK:
return
effectivelyExploitAll(self.w3af, ea.activatedPlugins, ea.stopOnFirst)
return
| [
"[email protected]"
]
| ||
9057b02d2ebad2cfc59b5649da5d1eeb5780b432 | 8d5f49fa1fda8ffc473e7f5a62786c77838a5820 | /website/drawquest/dbrouters.py | 23bbfb4be240253be8526040cf768de593b23d88 | [
"BSD-3-Clause"
]
| permissive | MichaelBechHansen/drawquest-web | dfc6f5d9541860a5df23db678e82564a230bd42e | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | refs/heads/master | 2021-01-14T10:30:10.861222 | 2015-11-10T03:13:42 | 2015-11-10T03:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from django.conf import settings
class DatabaseAppRouter(object):
"""
A router to control all database operations on models for different
databases.
In case an app is not set in settings.DATABASE_APPS_MAPPING, the router
will fallback to the `default` database.
Settings example:
DATABASE_APPS_MAPPING = {'app1': 'db1', 'app2': 'db2'}
"""
def db_for_read(self, model, **hints):
"""" Point all read operations to the specific database. """
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label)
def db_for_write(self, model, **hints):
""" Point all write operations to the specific database. """
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label)
def allow_relation(self, obj1, obj2, **hints):
""" Allow any relation between apps that use the same database. """
db_obj1 = settings.DATABASE_APPS_MAPPING.get(obj1._meta.app_label)
db_obj2 = settings.DATABASE_APPS_MAPPING.get(obj2._meta.app_label)
if db_obj1 and db_obj2:
if db_obj1 == db_obj2:
return True
else:
return False
def allow_syncdb(self, db, model):
""" Make sure that apps only appear in the related database. """
if model._meta.app_label == 'south':
return True
elif db in settings.DATABASE_APPS_MAPPING.values():
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label) == db
elif settings.DATABASE_APPS_MAPPING.has_key(model._meta.app_label):
return False
elif db != 'default':
return False
return True
| [
"[email protected]"
]
| |
444afd65d83f521bbd49a2443f13fc3fbfceb654 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03078/s480863669.py | ac933d8bc820956754a8b02303270586b6a2aaa3 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # solution
x,y,z,k = map(int, input().split())
a = sorted(list(map(int, input().split())), reverse = True)
b = sorted(list(map(int, input().split())), reverse = True)
c = sorted(list(map(int, input().split())), reverse = True)
ans = []
for p in range(min(k,len(a))):
for q in range(min(k,len(b))):
for r in range(min(k,len(c))):
if((p+1)*(q+1)*(r+1) > k):
break
ans.append(a[p] + b[q] + c[r])
ans = sorted(ans, reverse = True)
for i in range(k):
print(ans[i]) | [
"[email protected]"
]
| |
3a074572647edca905c1104c2e82709c859ebddb | 4050f786f3cc505760e25608d66805e3543835f8 | /the_flyer_15147/urls.py | 141a25667c75334ebfabf7887b5c99cfe55f3ff9 | []
| no_license | crowdbotics-apps/the-flyer-15147 | 6fb0a403286d06c5393d9f58b39f76ad5c538312 | e2f62327110f1200c8d4ebf46f127ce4fe903189 | refs/heads/master | 2022-12-11T02:03:31.153849 | 2020-03-28T02:01:50 | 2020-03-28T02:01:50 | 250,693,069 | 0 | 0 | null | 2022-12-08T05:09:49 | 2020-03-28T01:59:48 | Python | UTF-8 | Python | false | false | 2,055 | py | """the_flyer_15147 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("event.api.v1.urls")),
path("event/", include("event.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "the flyer"
admin.site.site_title = "the flyer Admin Portal"
admin.site.index_title = "the flyer Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="the flyer API",
default_version="v1",
description="API documentation for the flyer App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
]
| |
664fa09f48389314c2a900ac4b98a88fce679aba | 84a617d20424c0e06847ee5230c579d99e85956c | /pytorch/re/pure/entity_models.py | 36fb6f121c714294e0fbd9672ed9362eec3ddb85 | []
| no_license | ienoob/neo_nlp_project | a6b1fce5ba3413e3d8dba174ffba8b8700d03444 | 1009dc9780b8827813ee3e575e8bfcb03aa5d388 | refs/heads/master | 2023-02-22T20:09:06.366958 | 2022-07-14T02:45:12 | 2022-07-14T02:45:12 | 249,361,823 | 8 | 0 | null | 2023-02-16T03:31:49 | 2020-03-23T07:22:13 | Python | UTF-8 | Python | false | false | 12,864 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) ***
import torch
import torch.nn as nn
from transformers import BertTokenizer, BertPreTrainedModel, BertModel
from transformers import AlbertTokenizer, AlbertPreTrainedModel, AlbertModel
from torch.nn import CrossEntropyLoss
import logging
logger = logging.getLogger('root')
class BertForEntity(BertPreTrainedModel):
def __init__(self, config, num_ner_labels, head_hidden_dim=150, width_embedding_dim=150, max_span_length=8):
super().__init__(config)
self.bert = BertModel(config)
self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
self.width_embedding = nn.Embedding(max_span_length + 1, width_embedding_dim)
self.ner_classifier = nn.Sequential(
FeedForward(input_dim=config.hidden_size * 2 + width_embedding_dim,
num_layers=2,
hidden_dims=head_hidden_dim,
activations=F.relu,
dropout=0.2),
nn.Linear(head_hidden_dim, num_ner_labels)
)
self.init_weights()
def _get_span_embeddings(self, input_ids, spans, token_type_ids=None, attention_mask=None):
sequence_output, pooled_output = self.bert(input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
sequence_output = self.hidden_dropout(sequence_output)
"""
spans: [batch_size, num_spans, 3]; 0: left_ned, 1: right_end, 2: width
spans_mask: (batch_size, num_spans, )
"""
spans_start = spans[:, :, 0].view(spans.size(0), -1)
spans_start_embedding = batched_index_select(sequence_output, spans_start)
spans_end = spans[:, :, 1].view(spans.size(0), -1)
spans_end_embedding = batched_index_select(sequence_output, spans_end)
spans_width = spans[:, :, 2].view(spans.size(0), -1)
spans_width_embedding = self.width_embedding(spans_width)
# Concatenate embeddings of left/right points and the width embedding
spans_embedding = torch.cat((spans_start_embedding, spans_end_embedding, spans_width_embedding), dim=-1)
"""
spans_embedding: (batch_size, num_spans, hidden_size*2+embedding_dim)
"""
return spans_embedding
def forward(self, input_ids, spans, spans_mask, spans_ner_label=None, token_type_ids=None, attention_mask=None):
spans_embedding = self._get_span_embeddings(input_ids, spans, token_type_ids=token_type_ids,
attention_mask=attention_mask)
ffnn_hidden = []
hidden = spans_embedding
for layer in self.ner_classifier:
hidden = layer(hidden)
ffnn_hidden.append(hidden)
logits = ffnn_hidden[-1]
if spans_ner_label is not None:
loss_fct = CrossEntropyLoss(reduction='sum')
if attention_mask is not None:
active_loss = spans_mask.view(-1) == 1
active_logits = logits.view(-1, logits.shape[-1])
active_labels = torch.where(
active_loss, spans_ner_label.view(-1), torch.tensor(loss_fct.ignore_index).type_as(spans_ner_label)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, logits.shape[-1]), spans_ner_label.view(-1))
return loss, logits, spans_embedding
else:
return logits, spans_embedding, spans_embedding
class EntityModel():
def __init__(self, args, num_ner_labels):
super().__init__()
bert_model_name = args.model
vocab_name = bert_model_name
if args.bert_model_dir is not None:
bert_model_name = str(args.bert_model_dir) + '/'
# vocab_name = bert_model_name + 'vocab.txt'
vocab_name = bert_model_name
logger.info('Loading BERT model from {}'.format(bert_model_name))
if args.use_albert:
self.tokenizer = AlbertTokenizer.from_pretrained(vocab_name)
self.bert_model = AlbertForEntity.from_pretrained(bert_model_name, num_ner_labels=num_ner_labels,
max_span_length=args.max_span_length)
else:
self.tokenizer = BertTokenizer.from_pretrained(vocab_name)
self.bert_model = BertForEntity.from_pretrained(bert_model_name, num_ner_labels=num_ner_labels,
max_span_length=args.max_span_length)
self._model_device = 'cpu'
self.move_model_to_cuda()
def move_model_to_cuda(self):
if not torch.cuda.is_available():
logger.error('No CUDA found!')
exit(-1)
logger.info('Moving to CUDA...')
self._model_device = 'cuda'
self.bert_model.cuda()
logger.info('# GPUs = %d' % (torch.cuda.device_count()))
if torch.cuda.device_count() > 1:
self.bert_model = torch.nn.DataParallel(self.bert_model)
def _get_input_tensors(self, tokens, spans, spans_ner_label):
start2idx = []
end2idx = []
bert_tokens = []
bert_tokens.append(self.tokenizer.cls_token)
for token in tokens:
start2idx.append(len(bert_tokens))
sub_tokens = self.tokenizer.tokenize(token)
bert_tokens += sub_tokens
end2idx.append(len(bert_tokens) - 1)
bert_tokens.append(self.tokenizer.sep_token)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokens)
tokens_tensor = torch.tensor([indexed_tokens])
bert_spans = [[start2idx[span[0]], end2idx[span[1]], span[2]] for span in spans]
bert_spans_tensor = torch.tensor([bert_spans])
spans_ner_label_tensor = torch.tensor([spans_ner_label])
return tokens_tensor, bert_spans_tensor, spans_ner_label_tensor
def _get_input_tensors_batch(self, samples_list, training=True):
tokens_tensor_list = []
bert_spans_tensor_list = []
spans_ner_label_tensor_list = []
sentence_length = []
max_tokens = 0
max_spans = 0
for sample in samples_list:
tokens = sample['tokens']
spans = sample['spans']
spans_ner_label = sample['spans_label']
tokens_tensor, bert_spans_tensor, spans_ner_label_tensor = self._get_input_tensors(tokens, spans,
spans_ner_label)
tokens_tensor_list.append(tokens_tensor)
bert_spans_tensor_list.append(bert_spans_tensor)
spans_ner_label_tensor_list.append(spans_ner_label_tensor)
assert (bert_spans_tensor.shape[1] == spans_ner_label_tensor.shape[1])
if (tokens_tensor.shape[1] > max_tokens):
max_tokens = tokens_tensor.shape[1]
if (bert_spans_tensor.shape[1] > max_spans):
max_spans = bert_spans_tensor.shape[1]
sentence_length.append(sample['sent_length'])
sentence_length = torch.Tensor(sentence_length)
# apply padding and concatenate tensors
final_tokens_tensor = None
final_attention_mask = None
final_bert_spans_tensor = None
final_spans_ner_label_tensor = None
final_spans_mask_tensor = None
for tokens_tensor, bert_spans_tensor, spans_ner_label_tensor in zip(tokens_tensor_list, bert_spans_tensor_list,
spans_ner_label_tensor_list):
# padding for tokens
num_tokens = tokens_tensor.shape[1]
tokens_pad_length = max_tokens - num_tokens
attention_tensor = torch.full([1, num_tokens], 1, dtype=torch.long)
if tokens_pad_length > 0:
pad = torch.full([1, tokens_pad_length], self.tokenizer.pad_token_id, dtype=torch.long)
tokens_tensor = torch.cat((tokens_tensor, pad), dim=1)
attention_pad = torch.full([1, tokens_pad_length], 0, dtype=torch.long)
attention_tensor = torch.cat((attention_tensor, attention_pad), dim=1)
# padding for spans
num_spans = bert_spans_tensor.shape[1]
spans_pad_length = max_spans - num_spans
spans_mask_tensor = torch.full([1, num_spans], 1, dtype=torch.long)
if spans_pad_length > 0:
pad = torch.full([1, spans_pad_length, bert_spans_tensor.shape[2]], 0, dtype=torch.long)
bert_spans_tensor = torch.cat((bert_spans_tensor, pad), dim=1)
mask_pad = torch.full([1, spans_pad_length], 0, dtype=torch.long)
spans_mask_tensor = torch.cat((spans_mask_tensor, mask_pad), dim=1)
spans_ner_label_tensor = torch.cat((spans_ner_label_tensor, mask_pad), dim=1)
# update final outputs
if final_tokens_tensor is None:
final_tokens_tensor = tokens_tensor
final_attention_mask = attention_tensor
final_bert_spans_tensor = bert_spans_tensor
final_spans_ner_label_tensor = spans_ner_label_tensor
final_spans_mask_tensor = spans_mask_tensor
else:
final_tokens_tensor = torch.cat((final_tokens_tensor, tokens_tensor), dim=0)
final_attention_mask = torch.cat((final_attention_mask, attention_tensor), dim=0)
final_bert_spans_tensor = torch.cat((final_bert_spans_tensor, bert_spans_tensor), dim=0)
final_spans_ner_label_tensor = torch.cat((final_spans_ner_label_tensor, spans_ner_label_tensor), dim=0)
final_spans_mask_tensor = torch.cat((final_spans_mask_tensor, spans_mask_tensor), dim=0)
# logger.info(final_tokens_tensor)
# logger.info(final_attention_mask)
# logger.info(final_bert_spans_tensor)
# logger.info(final_bert_spans_tensor.shape)
# logger.info(final_spans_mask_tensor.shape)
# logger.info(final_spans_ner_label_tensor.shape)
return final_tokens_tensor, final_attention_mask, final_bert_spans_tensor, final_spans_mask_tensor, final_spans_ner_label_tensor, sentence_length
def run_batch(self, samples_list, try_cuda=True, training=True):
# convert samples to input tensors
tokens_tensor, attention_mask_tensor, bert_spans_tensor, spans_mask_tensor, spans_ner_label_tensor, sentence_length = self._get_input_tensors_batch(
samples_list, training)
output_dict = {
'ner_loss': 0,
}
if training:
self.bert_model.train()
ner_loss, ner_logits, spans_embedding = self.bert_model(
input_ids=tokens_tensor.to(self._model_device),
spans=bert_spans_tensor.to(self._model_device),
spans_mask=spans_mask_tensor.to(self._model_device),
spans_ner_label=spans_ner_label_tensor.to(self._model_device),
attention_mask=attention_mask_tensor.to(self._model_device),
)
output_dict['ner_loss'] = ner_loss.sum()
output_dict['ner_llh'] = F.log_softmax(ner_logits, dim=-1)
else:
self.bert_model.eval()
with torch.no_grad():
ner_logits, spans_embedding, last_hidden = self.bert_model(
input_ids=tokens_tensor.to(self._model_device),
spans=bert_spans_tensor.to(self._model_device),
spans_mask=spans_mask_tensor.to(self._model_device),
spans_ner_label=None,
attention_mask=attention_mask_tensor.to(self._model_device),
)
_, predicted_label = ner_logits.max(2)
predicted_label = predicted_label.cpu().numpy()
last_hidden = last_hidden.cpu().numpy()
predicted = []
pred_prob = []
hidden = []
for i, sample in enumerate(samples_list):
ner = []
prob = []
lh = []
for j in range(len(sample['spans'])):
ner.append(predicted_label[i][j])
# prob.append(F.softmax(ner_logits[i][j], dim=-1).cpu().numpy())
prob.append(ner_logits[i][j].cpu().numpy())
lh.append(last_hidden[i][j])
predicted.append(ner)
pred_prob.append(prob)
hidden.append(lh)
output_dict['pred_ner'] = predicted
output_dict['ner_probs'] = pred_prob
output_dict['ner_last_hidden'] = hidden
return output_dict
| [
"[email protected]"
]
| |
1e445cc5bd290315f961eb98d248e02c72584909 | f4653b4bd7528150a53c8f454658c00d7ea0b836 | /cbm/ipycbm/ipy_view/view_main.py | e19d0de98e84ac96843c63b82e5adf468f855f50 | [
"BSD-3-Clause"
]
| permissive | mokasini/cbm | ccb09cb8ab96e6b06b0e13d86ff51124538706f6 | 33bd9c8a0d107f6cdc3343953ae9f7c9bd9272cd | refs/heads/main | 2023-02-24T04:44:07.744715 | 2021-02-01T12:29:38 | 2021-02-01T12:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import Tab
from cbm.ipycbm.utils import help_docs
from cbm.ipycbm.ipy_view import view_settings, view_panel
def view_widget_box():
try:
tab_box = Tab(children=[view_panel.view(), help_docs.widget_box(),
view_settings.widget_box()])
tab_box.set_title(0, 'View Data')
tab_box.set_title(1, 'Help')
tab_box.set_title(2, 'Settings')
except Exception as err:
tab_box = Tab(children=[help_docs.widget_box(),
view_settings.widget_box()])
tab_box.set_title(1, 'Help')
tab_box.set_title(2, 'Settings')
print("Could not show 'View panel'.", err)
return tab_box
| [
"[email protected]"
]
| |
27c35318c6b5f8212dd449e282c2b081d6dc4c61 | 046c1141399890afa13fd243e55da3dbf31085c5 | /test/test22.py | 05c20e2d7892acce138d4df0ab6d184be9b7d49e | []
| no_license | carusyte/tflab | 1d0edf87282352aeb5a38b83c58ab9c0189bbb1a | 2324c3b0ad22d28c50a4fd8db56e36a2836735c3 | refs/heads/master | 2021-05-12T06:58:26.270868 | 2019-03-24T14:57:44 | 2019-03-24T14:57:44 | 117,232,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,165 | py | from __future__ import print_function
# Path hack.
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import tensorflow as tf
from pstk.model import model11
from time import strftime
from pstk.data import data as data0
from pstk.data import data12
from test import collect_summary
import os
import numpy as np
import math
EPOCH_SIZE = 444
RNN_LAYERS = 1
FCN_LAYERS = 3
LAYER_WIDTH = 256
MAX_STEP = 50
TIME_SHIFT = 9
DROP_OUT = math.e / 10.0
LEARNING_RATE = 1e-3
LOG_DIR = 'logdir'
def run():
tf.logging.set_verbosity(tf.logging.INFO)
loader = data12.DataLoader(TIME_SHIFT)
print('{} loading test data...'.format(strftime("%H:%M:%S")))
tuuids, tdata, tlabels, tseqlen = loader.loadTestSet(MAX_STEP)
print(tdata.shape)
print(tlabels.shape)
featSize = tdata.shape[2]
nclass = tlabels.shape[1]
classes = [i-nclass//2 for i in range(nclass)]
data = tf.placeholder(tf.float32, [None, MAX_STEP, featSize], "input")
target = tf.placeholder(tf.float32, [None, nclass], "labels")
seqlen = tf.placeholder(tf.int32, [None], "seqlen")
dropout = tf.placeholder(tf.float32, [], name="dropout")
training = tf.placeholder(tf.bool, [], name="training")
with tf.Session() as sess:
model = model11.DRnnPredictorV6(
data=data,
target=target,
seqlen=seqlen,
classes=classes,
rnn_layers=RNN_LAYERS,
fcn_layers=FCN_LAYERS,
layer_width=LAYER_WIDTH,
dropout=dropout,
training=training,
learning_rate=LEARNING_RATE)
stime = '{}'.format(strftime("%Y-%m-%d %H:%M:%S"))
model_name = model.getName()
f = __file__
fbase = f[f.rfind('/')+1:f.rindex('.py')]
base_dir = '{}/{}_{}/{}'.format(LOG_DIR, fbase,
model_name, strftime("%Y%m%d_%H%M%S"))
print('{} using model: {}'.format(strftime("%H:%M:%S"), model_name))
if tf.gfile.Exists(base_dir):
tf.gfile.DeleteRecursively(base_dir)
tf.gfile.MakeDirs(base_dir)
# Isolate the variables stored behind the scenes by the metric operation
metric_local_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="Precisions") + tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="Recalls")
metric_vars_initializer = tf.variables_initializer(
var_list=metric_local_vars)
sess.run(tf.group(tf.global_variables_initializer(),
metric_vars_initializer))
summary, train_writer, test_writer = collect_summary(
sess, model, base_dir)
saver = tf.train.Saver()
bno = 0
for epoch in range(EPOCH_SIZE):
bno = epoch*50
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tlabels,
seqlen: tseqlen, dropout: 0, training: False}
accuracy, worst, test_summary_str = sess.run(
[model.accuracy, model.worst, summary, model.precisions[1], model.recalls[1], model.f_score], feeds)[:3]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} test accuracy {:3.3f}% max_entropy {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, 100. * accuracy, max_entropy, predict, actual, tuuids[bidx]))
data0.save_worst_rec(model_name, stime, "test", epoch,
tuuids[bidx], max_entropy, predict, actual)
summary_str = None
for i in range(50):
sess.run(metric_vars_initializer)
bno = bno+1
print('{} loading training data for batch {}...'.format(
strftime("%H:%M:%S"), bno))
truuids, trdata, labels, trseqlen = loader.loadTrainingData(
bno, MAX_STEP)
print('{} training...'.format(strftime("%H:%M:%S")))
feeds = {data: trdata, target: labels,
seqlen: trseqlen, dropout: DROP_OUT, training: True}
summary_str, worst = sess.run(
[summary, model.worst, model.optimize, model.precisions[1], model.recalls[1], model.f_score], feeds)[:2]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} bno {} max_entropy {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), bno, max_entropy, predict, actual))
data0.save_worst_rec(model_name, stime, "train", bno,
truuids[bidx], max_entropy, predict, actual)
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
checkpoint_file = os.path.join(base_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=bno)
sess.run(metric_vars_initializer)
# test last epoch
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tlabels, seqlen: tseqlen,
dropout: 0, training: False}
accuracy, worst, test_summary_str = sess.run(
[model.accuracy, model.worst, summary, model.precisions[1], model.recalls[1], model.f_score], feeds)[:3]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} test accuracy {:3.3f}% max_entropy {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), EPOCH_SIZE, 100. * accuracy, max_entropy, predict, actual))
data0.save_worst_rec(model_name, stime, "test", EPOCH_SIZE,
tuuids[bidx], max_entropy, predict, actual)
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
if __name__ == '__main__':
run()
| [
"[email protected]"
]
| |
4ecf47ca7e7b37620817c44064a35600aa63affa | dfc2c18053b8e7576f88e7b2524d7ca3a8f47282 | /ch03/session3/63.py | a3458fac0a02818719ccecbeba2d2a88982ce7e0 | []
| no_license | Xoozi/tchomework | a6eed3bbf697ff12af8d42249ec58a139aed0c4c | 627c98b0b652ef20fd93025a17341bba76fbfce6 | refs/heads/master | 2021-01-23T21:18:15.793703 | 2018-10-21T11:05:55 | 2018-10-21T11:05:55 | 57,583,655 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | #一族三次曲线
#(a)对k = 0, 及其邻近的k的正和负值, 把f(x) = x**3 + k*x的图形画在一个公共屏幕上.
#k的值是怎么影响到图形的形状的
#k小于0时, 函数递减, 只有一个根
#k向0移动, 函数图像开始逆时针旋转, 并且开始弯曲, 靠近0时开始有多个根
#k大于0时, 又开始伸展,
#(b)求f'(x). 正如你知道的, f'(x)是一个二次函数, 求该二次函数的判别式. 对什么样的k值, 该判别式
#为正, 为零, 为负? 对什么k值f'有两个零点, 一个或,没有零点?
#说明k的值对f图形的形状有什么影响?
#f'(x) = 3*x**2 + k
#Δ = -4*3*k = -12k
#k>0时Δ<0, f'无零点
#k<0时Δ>0, f'有两个零点
#k=0时Δ=0, f'有一个零点
#说明k值影响了f是否有极值
def f(x, k):
return x**3 + k*x
def ddd(s, e, a):
r = 0
g = 0
b = 0
k = -1280
plot([s, e], [0, 0], '-k')
x = linspace(s, e, a)
while(k <= 1280):
y = f(x, k)
plot(x, y, '#%02X%02X%02X' % (r, g, b))
r += 2
k += 20
ddd(-16, 16, 1000)
| [
"[email protected]"
]
| |
d954433fc734887cf2bed62499ea0205cefd66a3 | 30b97efb2f36f81aa684d16d19e0e2db17f2967d | /기타/2468.py | 05d75b8ff59e165f6298ad243ba4d49c20202b24 | []
| no_license | jmseb3/bakjoon | 0a784a74c6476ef51864e2ada9d2551c7c7979eb | a38db54e851372059b0e45add92e43e556835e62 | refs/heads/main | 2023-08-25T08:43:04.579785 | 2021-10-01T08:40:37 | 2021-10-01T08:40:37 | 362,287,450 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | from collections import deque
N = int(input())
maps = []
max_len = 0
for _ in range(N):
temp = list(map(int, input().split()))
max_len = max(max_len, max(temp))
maps.append(temp)
moves = [(-1, 0), (1, 0), (0, 1), (0, -1)]
ans = 0
def bfs(y, x, ck, visited):
q = deque()
q.append((y, x))
visited[y][x] = True
while q:
y, x = q.popleft()
for dy, dx in moves:
ny = y + dy
nx = x + dx
if ny < 0 or nx < 0 or ny >= N or nx >= N:
continue
if maps[ny][nx] >= ck and not visited[ny][nx]:
visited[ny][nx] = True
q.append((ny, nx))
for ck in range(max_len+1):
tmp = 0
visited = [[False]*N for _ in range(N)]
for y in range(N):
for x in range(N):
if maps[y][x] >= ck and not visited[y][x]:
bfs(y, x, ck, visited)
tmp += 1
ans = max(tmp, ans)
print(ans)
| [
"[email protected]"
]
| |
e1ecdb9dddc1bcdc4da805d75772b02eead18e04 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/LIVINGSTON-PM4-MIB.py | aa942223a96f63f216f498a166e8bc9c5381dac9 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 135,200 | py | #
# PySNMP MIB module LIVINGSTON-PM4-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LIVINGSTON-PM4-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:07:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
lucentPM4, = mibBuilder.importSymbols("LIVINGSTON-ROOT-MIB", "lucentPM4")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
sysName, = mibBuilder.importSymbols("SNMPv2-MIB", "sysName")
Counter32, Gauge32, Counter64, IpAddress, ModuleIdentity, Unsigned32, Integer32, TimeTicks, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, NotificationType, NotificationType, MibIdentifier, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Gauge32", "Counter64", "IpAddress", "ModuleIdentity", "Unsigned32", "Integer32", "TimeTicks", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "NotificationType", "NotificationType", "MibIdentifier", "iso")
DisplayString, TextualConvention, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "PhysAddress")
class PMUnitType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 255))
namedValues = NamedValues(("mrgmodule", 1), ("quadt1", 2), ("trie1", 3), ("modem", 4), ("serialport", 5), ("ether0", 6), ("ether1", 7), ("console", 8), ("acpwrsup", 9), ("fan", 10), ("dcpwrsup", 11), ("allunits", 255))
class PMEquipPRIStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("up", 1), ("down", 2), ("loopback", 3), ("fault", 4))
class PMEquipStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("up", 1), ("down", 2), ("maintenance", 3), ("fault", 4), ("other", 5))
class PMDiagCmdStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("success", 1), ("fail", 2), ("inprogress", 3), ("notsupported", 4), ("aborted", 5), ("other", 6))
class PMDiagTestCntrl(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("normal", 1), ("start", 2), ("stop", 3), ("abort", 4))
class PMAlarmType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("informational", 1), ("warning", 2), ("minor", 3), ("major", 4), ("critical", 5))
lucentPM4Mib = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1))
lucentPM4Traps = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2))
lucentPM4MibRev = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4MibRev.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4MibRev.setDescription('This object specifies the current MIB revision number. Example of the MIB revision can be PM4xxx for PM4 product and PM3xxx for PM3 products etc. Where xxx can be any combination of alpha-numeric characters.')
lucentPM4SWRev = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SWRev.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SWRev.setDescription('This object specifies the ComOS revision number. Example of the ComOS revision can be ComOS4.xx. Where xxx can be any combination of alpha-numeric characters.')
lucentPM4Chassis = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3))
lucentPM4ChasSummary = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(55, 55)).setFixedLength(55)).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ChasSummary.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasSummary.setDescription("This object provides general information about the PortMaster 4 chassis operational status. This object is read-only. The DisplayString represents a summary of all the devices in the chassis as follows: Bytes 1 - 2 '0''0' Byte 3 'U', 'D', 'M', 'F', 'O' Byte 4 space Bytes 5 - 6 '0''1' Byte 7 'U', 'D', 'M', 'F', 'O' Byte 8 space Bytes 9 - 10 '0''2' Byte 11 'U', 'D', 'M', 'F', 'O' Byte 12 space Bytes 13 - 14 '0''3' Byte 15 'U', 'D', 'M', 'F', 'O' Byte 16 space Bytes 17 - 18 '0''4' Byte 19 'U', 'D', 'M', 'F', 'O' Byte 20 space Bytes 21 - 22 '0''5' Byte 23 'U', 'D', 'M', 'F', 'O' Byte 24 space Bytes 25 - 26 '0''6' Byte 27 'U', 'D', 'M', 'F', 'O' Byte 28 space Bytes 29 - 30 '0''7' Byte 31 'U', 'D', 'M', 'F', 'O' Byte 32 space Bytes 33 - 34 '0''8' Byte 35 'U', 'D', 'M', 'F', 'O' Byte 36 space Bytes 37 - 38 '0''9' Byte 39 'U', 'D', 'M', 'F', 'O' Byte 40 space Bytes 41 - 42 '1''0' Byte 43 'U', 'D', 'M', 'F', 'O' Byte 44 space Bytes 45 - 46 '1''1' Byte 47 'U', 'D', 'M', 'F', 'O' Byte 48 space Bytes 49 - 50 '1''2' Byte 51 'U', 'D', 'M', 'F', 'O' Byte 52 space Bytes 53 - 54 '1''3' Byte 55 'U', 'D', 'M', 'F', 'O' Byte 56 space Bytes 57 - 58 '1''4' Byte 59 'U', 'D', 'M', 'F', 'O' Byte 60 space Bytes 61 - 62 '1''5' Byte 63 'U', 'D', 'M', 'F', 'O' Byte 64 space Bytes 65 - 66 'P''1' Byte 67 'U', 'D', 'M', 'F', 'O' Byte 68 space Bytes 69 - 70 'P''2' Byte 71 'U', 'D', 'M', 'F', 'O' Byte 72 space Bytes 73 - 74 'P''3' Byte 75 'U', 'D', 'M', 'F', 'O' Byte 76 space Bytes 77 - 78 'D''1' Byte 79 'U', 'D', 'M', 'F', 'O' Byte 80 space Bytes 81 - 82 'D''2' Byte 83 'U', 'D', 'M', 'F', 'O' Byte 84 space Bytes 85 - 86 'F''1' Byte 87 'U', 'D', 'M', 'F', 'O' Byte 88 space Bytes 89 - 90 'F''2' Byte 91 'U', 'D', 'M', 'F', 'O' Byte 92 space Bytes 93 - 94 'F''3' Byte 95 'U', 'D', 'M', 'F', 'O' Byte 96 space Bytes 97 - 98 'F''4' Byte 99 'U', 'D', 'M', 'F', 'O' Legend '#''#' Represents the board number 'F''#' Represents the Fan # 'P''#' Represents the Power Supply # 'D''#' Represents the DC Power Supply # 'U' Up 'D' Down 'M' Maintenance 'F' Failed 'O' Other unknown state.")
lucentPM4ChasCmdTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2), )
if mibBuilder.loadTexts: lucentPM4ChasCmdTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdTable.setDescription('Table describing the commands that can be issued to the agent to perform specific actions to any card, port or device in the system. For example to erase the flash or a particular file from the flash. Note that only a station configured with the appropriate authentication string can issue commands to the agent.')
lucentPM4ChasCmdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4ChasCmdIndex"))
if mibBuilder.loadTexts: lucentPM4ChasCmdEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdEntry.setDescription('Entries in the command table for the chassis commands. This describes one entry or row in the command table.')
lucentPM4ChasCmdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdIndex.setDescription('This object specifies the index in the command table. The values for this object is limited to the size of the command table on the network element. The size of the command table is set to 10 which can be changed if and when users need to schedule more than 10 commands at a given time.')
lucentPM4ChasCmdBoardId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdBoardId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdBoardId.setDescription('This object specifies the board for which the command is to be applied. The values for this object is limited to the Max number of boards.')
lucentPM4ChasCmdUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 3), PMUnitType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitType.setDescription('This object specifies the type of the device to which this command must apply.')
lucentPM4ChasCmdUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitIndex.setDescription('This object specifies the interface index.')
lucentPM4ChasCmdDevId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdDevId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdDevId.setDescription('This object specifies the sub-unit id for which the command must be applied to. This value will be used by the agent to index to the correct sub-device on a board. For example, this object can have values from 1 - 96 for the modems or 1 - 4 for T1 or 1 - 3 for the E1.')
lucentPM4ChasCmdId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29))).clone(namedValues=NamedValues(("eraseflashfile", 1), ("eraseallflash", 2), ("saveall", 3), ("resetport", 4), ("resetfilter", 5), ("adduser", 6), ("deleteuser", 7), ("addlocation", 8), ("diallocation", 9), ("addfilter", 10), ("deletefilter", 11), ("addmodem", 12), ("resetvirtport", 13), ("addospfarea", 14), ("resetospf", 15), ("addprop", 16), ("deleteprop", 17), ("resetprop", 18), ("resetether0", 19), ("resetether1", 20), ("resetall", 21), ("resetconsole", 22), ("version", 23), ("traceroutes", 24), ("ptrace", 25), ("ifconfig", 26), ("eraseconfig", 27), ("erasecomos", 28), ("reboot", 29)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdId.setDescription('This object specifies the command. Each command takes a unique value. The completion status of this command is set in the result object of the table.')
lucentPM4ChasCmdParams = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdParams.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdParams.setDescription("This object specifies the command parameters. Each parameter must be seperated by a blank space. The last parameter is terminated with a ';'.")
lucentPM4ChasCmdResult = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 8), PMDiagCmdStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ChasCmdResult.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdResult.setDescription('This object specifies the command result. The result for each of the previous 10 commands will be stored in a table, which can be retrieved by the client when needed.')
lucentPM4ConfigMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4))
lucentPM4CmInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1))
lucentPM4CmSerial = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1))
lucentPM4SerialTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1), )
if mibBuilder.loadTexts: lucentPM4SerialTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTable.setDescription('A list of serial interface entries.')
lucentPM4SerialEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4SerialBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4SerialIndex"))
if mibBuilder.loadTexts: lucentPM4SerialEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialEntry.setDescription('A serial interface entry containing objects at the physical and session layer.')
lucentPM4SerialBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialBoardIndex.setDescription('A unique value for each board in the PortMaster chassis. The Max value of this variable is limited by the number of boards in the chassis. This value is limited to 255.')
lucentPM4SerialUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialUnitType.setDescription('Unit type indicates the serial port. The interface table ifIndex is a combination of board index, unit type and unit index.')
lucentPM4SerialIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialIndex.setDescription('A unique value for each serial interface on a given board.')
lucentPM4ModemId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemId.setDescription('This object is the cross reference to the modem interface table index. The value is dynamically assigned when the call is established.')
lucentPM4SerialPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialPortNumber.setDescription('A serial port to which this modem is assigned for this call.')
lucentPM4SerialPhysType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("async", 2), ("sync", 3), ("isdn", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialPhysType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialPhysType.setDescription('The type of physical serial interface, distinguished according to the physical/link protocol(s) being currently used on the interface. When this object is set to asyn(2), then the service types dial-in, dial- out, login, and device are valid. When this object is set to sync(3), the serial service types dial-in, dial- out and hardwired are valid.')
lucentPM4SerialPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("idle", 1), ("connecting", 2), ("established", 3), ("disconnecting", 4), ("command", 5), ("noservice", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialPortStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialPortStatus.setDescription('The status of the serial interface.')
lucentPM4SerialDS0State = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notavailable", 1), ("busyout", 2), ("havecomport", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialDS0State.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialDS0State.setDescription('Cross reference value for each DS0 for a given T1/E1 line and a given board in the PM4 chassis.')
lucentPM4SerialUser = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialUser.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialUser.setDescription('Name of the active user. Blank if not active.')
lucentPM4SerialSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialSessionId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialSessionId.setDescription('A unique Session Identifier which matches the RADIUS session ID. Blank when not using RADIUS.')
lucentPM4SerialTypeHardwired = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeHardwired.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeHardwired.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeNwDialIn = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialIn.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialIn.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeNwDialout = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialout.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialout.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeLogin = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeLogin.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeLogin.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeDevice = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeDevice.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeDevice.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 16), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeDeviceName.setDescription('Device name if the lucentPM4SerialTypeDevice is enabled. This is a string of characters (e.g. /dev/tty1) indicating the device name.')
lucentPM4SerialDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("in", 1), ("out", 2), ("inout", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialDirection.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialDirection.setDescription('The direction the active session was initiated.')
lucentPM4SerialStarted = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 18), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialStarted.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialStarted.setDescription('The amount of time this session has been active.')
lucentPM4SerialIdle = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 19), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialIdle.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialIdle.setDescription('The amount of time this session has been idle.')
lucentPM4SerialInSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 20), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialInSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialInSpeed.setDescription("An estimate of the serial interface's current inbound bandwidth in bits per second.")
lucentPM4SerialOutSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialOutSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialOutSpeed.setDescription("An estimate of the serial interface's current outbound bandwidth in bits per second.")
lucentPM4SerialIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 22), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialIpAddress.setDescription("The IP address associated with the serial interface. If being used as a network type port, this is the remote user's IP address. If being used as a device or login, this is the IP address of the host the user is connected to.")
lucentPM4SerialifDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 23), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialifDescr.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialifDescr.setDescription('A textual string containing information about the network interface bound to the serial interface.')
lucentPM4SerialInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialInOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialInOctets.setDescription('The total number of octets received on the serial interface.')
lucentPM4SerialOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialOutOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialOutOctets.setDescription('The total number of octets transmitted on the serial interface.')
lucentPM4SerialQOctets = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialQOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialQOctets.setDescription('The total number of octets queued on the serial interface.')
lucentPM4CmT1E1 = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2))
lucentPM4T1E1Table = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1), )
if mibBuilder.loadTexts: lucentPM4T1E1Table.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Table.setDescription('A list of T1/E1 interface entries.')
lucentPM4T1E1Entry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1BoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1Index"))
if mibBuilder.loadTexts: lucentPM4T1E1Entry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Entry.setDescription('A T1/E1 entry containing objects at the physical layer.')
lucentPM4T1E1BoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1BoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1BoardIndex.setDescription('A unique value for each board in the PM4 chassis.')
lucentPM4T1E1UnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1UnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1UnitType.setDescription('This object specifies the type of the unit as the T1/E1 line. This value is a part of the interface table ifIndex.')
lucentPM4T1E1Index = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Index.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Index.setDescription('A unique value for each T1/E1 interface.')
lucentPM4T1E1SerialIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SerialIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SerialIndex.setDescription('The value of the instance for the serial port. This object provides a cross reference from the T1/E1 interface to the serial port to which it is mapped.')
lucentPM4T1E1SerialCount = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SerialCount.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SerialCount.setDescription('The number of serial ports assigned to this interface. If this is a Channelized T1/E1, then the count is 24/32. If this is a fractional T1/E1, then the count can be any number between 1 and a number less than 24/32.')
lucentPM4T1E1PhysType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("t1", 1), ("e1", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PhysType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PhysType.setDescription('The type of interface (T1 or E1).')
lucentPM4T1E1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 7), PMEquipPRIStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Status.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Status.setDescription('The current operational status of the interface.')
lucentPM4T1E1Function = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("isdn", 1), ("channelized", 2), ("clear", 3), ("fractional", 4), ("isdnfrac", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Function.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Function.setDescription('The configured function of the interface.')
lucentPM4T1E1Framing = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("esf", 1), ("d4", 2), ("crc4", 3), ("fas", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Framing.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Framing.setDescription('The configured line framing.')
lucentPM4T1E1Encoding = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ami", 1), ("b8zs", 2), ("hdb3", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Encoding.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Encoding.setDescription('The configured line signal encoding.')
lucentPM4T1E1PCM = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ulaw", 1), ("alaw", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PCM.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PCM.setDescription('The configured voice modulation.')
lucentPM4T1E1SuperSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("em", 1), ("groundstart", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4T1E1SuperSignal.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SuperSignal.setDescription('The configured supervisory signalling mode for this interface.')
lucentPM4T1E1StartMode = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("wink", 1), ("immediate", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4T1E1StartMode.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1StartMode.setDescription('The configured start mode for this interface.')
lucentPM4T1E1ChangeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 14), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1ChangeTime.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1ChangeTime.setDescription('The amount of time since the last status change.')
lucentPM4T1E1RecvLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1RecvLevel.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1RecvLevel.setDescription("An estimate of the serial interface's current recieve signal level in DB.")
lucentPM4T1E1BlueAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1BlueAlarms.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1BlueAlarms.setDescription('The total number of Blue Alarms on the interface.')
lucentPM4T1E1YellowAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1YellowAlarms.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1YellowAlarms.setDescription('The total number of Yellow Alarms on the interface.')
lucentPM4T1E1CarrierLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1CarrierLoss.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1CarrierLoss.setDescription('The total number of times the interface has lost carrier.')
lucentPM4T1E1SyncLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SyncLoss.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SyncLoss.setDescription('The total number of times the interface has lost frame synchronization.')
lucentPM4T1E1BipolarErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1BipolarErrors.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1BipolarErrors.setDescription('The total number of bipolar violations detected on the interface.')
lucentPM4T1E1CRCErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1CRCErrors.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1CRCErrors.setDescription('The total number of frame level CRC errors detected on the interface.')
lucentPM4T1E1SyncErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SyncErrors.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SyncErrors.setDescription('The total number of frame synchronization errors detected on the interface.')
lucentPM4CmModem = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3))
lucentPM4ModemTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1), )
if mibBuilder.loadTexts: lucentPM4ModemTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemTable.setDescription('A list of modem entries.')
lucentPM4ModemEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4ModemBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4ModemUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4ModemIndex"))
if mibBuilder.loadTexts: lucentPM4ModemEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemEntry.setDescription('A modem entry containing objects at the session layer.')
lucentPM4ModemBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemBoardIndex.setDescription('A unique value for each modem interface.')
lucentPM4ModemUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemUnitType.setDescription('Unit type specifies the type of device or interface.')
lucentPM4ModemIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemIndex.setDescription('A unique value for each modem interface. The value of this object can be 1 - 96 for a Quad T1, 1 - 94 for a Tri E1 card.')
lucentPM4ModemPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemPortName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemPortName.setDescription('A textual string containing the name of the serial interface (ie. S0, S1, etc).')
lucentPM4ModemStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("none", 1), ("bound", 2), ("connecting", 3), ("active", 4), ("test", 5), ("down", 6), ("ready", 7), ("halt", 8), ("admin", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemStatus.setDescription('A current state of the modem.')
lucentPM4ModemProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("none", 1), ("lapm", 2), ("mnp", 3), ("bufferd", 4), ("direct", 5), ("cellular", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemProtocol.setDescription('The error correcting protocol being used in the modem.')
lucentPM4ModemCompression = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("v42bis", 2), ("mnp5", 3), ("stac", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemCompression.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemCompression.setDescription('The compression being used in the modem interface.')
lucentPM4ModemInSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemInSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemInSpeed.setDescription("An estimate of the modem interface's current inbound bandwidth in bits per second.")
lucentPM4ModemOutSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemOutSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemOutSpeed.setDescription("An estimate of the modem interface's current outbound bandwidth in bits per second.")
lucentPM4ModemInByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemInByteCount.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemInByteCount.setDescription('The total number of bytes received on the serial interface.')
lucentPM4ModemOutByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemOutByteCount.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemOutByteCount.setDescription('The total number of bytes transmitted on the serial interface.')
lucentPM4ModemRetrains = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemRetrains.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemRetrains.setDescription('The number of retrains attempted by the modem.')
lucentPM4ModemRenegotiates = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemRenegotiates.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemRenegotiates.setDescription('The number of renegotiates attempted by the modem.')
lucentPM4ModemCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemCalls.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemCalls.setDescription('The number of times a call received by the modem.')
lucentPM4ModemDetects = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemDetects.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemDetects.setDescription('The number of analog calls received by the modem.')
lucentPM4ModemConnects = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemConnects.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemConnects.setDescription('The number of successful calls received by the modem.')
lucentPM4CmEther = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4))
lucentPM4EtherTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1), )
if mibBuilder.loadTexts: lucentPM4EtherTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherTable.setDescription('A list of ethernet interface entries. This object is not accessible')
lucentPM4EtherEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4EtherBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4EtherIfType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4EtherIndex"))
if mibBuilder.loadTexts: lucentPM4EtherEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherEntry.setDescription('Ethernet interface entry containing objects at the Session/Physical layers.')
lucentPM4EtherBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherBoardIndex.setDescription('A unique value for each ethernet board. The manager card has two ethernet interfaces at present. The ethernet interface in slot 4 has a board ID 10 and if there is a manager card in slot 5, the board ID for the interface would be 11.')
lucentPM4EtherIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherIfType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIfType.setDescription('The interface type which together with the board ID and the interface number will uniquely identify the interface.')
lucentPM4EtherIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ether0", 1), ("ether1", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIndex.setDescription('A unique value for each ethernet interface. The manager card has two ethernet interfaces at present. ether0(1) represents 10 Base-T interface and ether1(2) specifies the 10/100 Base-T auto-sensing ethernet interface.')
lucentPM4EtherIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(67436545, 168099841))).clone(namedValues=NamedValues(("ether0", 67436545), ("ether1", 168099841)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIfIndex.setDescription('IfIndex cross reference value for the ethernet interfaces. The manager card has two ethernet interfaces at present. ether0(67436545) represents 10 Base-T interface which corresponds to board 4 and interface 1. The enumerated value ether1(168099841) specifies the 10/100 Base-T auto-sensing ethernet interface which corresponds to board 4 and interface 2. We can add the standby manager card ethernet interfaces when they are available.')
lucentPM4EtherPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherPortName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherPortName.setDescription('A printable ASCII string specifying the name of the ethernet port.')
lucentPM4EtherMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 6), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherMacAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherMacAddress.setDescription('Physical address of the interface.')
lucentPM4EtherIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIpAddress.setDescription('IP address of the interface.')
lucentPM4EtherIpGateway = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherIpGateway.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIpGateway.setDescription('IP address of the gateway machine.')
lucentPM4EtherPriNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherPriNameServer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherPriNameServer.setDescription('IP address of the primary name server.')
lucentPM4EtherAltNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherAltNameServer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherAltNameServer.setDescription('IP address of the alternate name server.')
lucentPM4EtherSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 11), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherSubnetMask.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherSubnetMask.setDescription('Subnet mask of the interface. Used to partition the network into different branches.')
lucentPM4EtherInFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherInFilter.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherInFilter.setDescription('IP Input packet filter. Used to control the type of IP packets reaching the interface.')
lucentPM4EtherOutFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOutFilter.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOutFilter.setDescription('IP output packet filter. Used to control the type of packets sent out of the interface.')
lucentPM4EtherOptRip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptRip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptRip.setDescription('The RIP protocol enable/disable option.')
lucentPM4EtherOptSlip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptSlip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptSlip.setDescription('The SLIP protocol enable/disable option.')
lucentPM4EtherOptEtherDown = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptEtherDown.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptEtherDown.setDescription('Ethernet interface down enable/disable option.')
lucentPM4EtherOptBcastHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptBcastHigh.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptBcastHigh.setDescription('Use high one(s) broadcast address enable/disable option.')
lucentPM4EtherOptSnmp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptSnmp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptSnmp.setDescription('Default SNMP protocol enable/disable option.')
lucentPM4EtherOptNoListen = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNoListen.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNoListen.setDescription('Do not listen to RIP on the ether interface.')
lucentPM4EtherOptDefaultRip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultRip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultRip.setDescription('Default RIP protocol enable/disable option.')
lucentPM4EtherOptDefaultListen = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultListen.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultListen.setDescription('Default listen enable/disable option.')
lucentPM4EtherOptIPFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptIPFilter.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptIPFilter.setDescription('IP filter enable/disable option.')
lucentPM4EtherOptDns = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptDns.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptDns.setDescription('DNS enable/disable option.')
lucentPM4EtherOptPmeMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptPmeMsg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptPmeMsg.setDescription('PME Msg. enable/disable option. Whatever that means.')
lucentPM4EtherOptNoClip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNoClip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNoClip.setDescription('No Clip enable/disable option. Whatever that means.')
lucentPM4EtherOptEtherIpx = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptEtherIpx.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptEtherIpx.setDescription('Ether IPX enable/disable option.')
lucentPM4EtherOptNetBIOS = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNetBIOS.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNetBIOS.setDescription('Net BIOS enable/disable option.')
lucentPM4EtherOptAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptAccounting.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptAccounting.setDescription('Accounting enable/disable option.')
lucentPM4EtherOptNoPAP = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNoPAP.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNoPAP.setDescription('PAP enable/disable option.')
lucentPM4FaultMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5))
lucentPM4FaultMgmtIsolation = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1))
lucentPM4FaultMgmtChasTrap = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1), )
if mibBuilder.loadTexts: lucentPM4FaultMgmtChasTrap.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FaultMgmtChasTrap.setDescription('Chassis Trap table which indicates one of several Traps. The chassis trap table stores the previous history of the traps which can be retrieved by the management stations at a later time. This object is not-accessible and present for MIB clarity.')
lucentPM4FMChasTrapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMChasTrapIndex"))
if mibBuilder.loadTexts: lucentPM4FMChasTrapEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapEntry.setDescription('Entry in the chassis Trap table. Each trap is uniquely identified by an index. This object is not accessible and present for MIB clarity.')
lucentPM4FMChasTrapIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapIndex.setDescription('Index into the Trap table on the agent. This table stores the previous 500 traps which can be retrieved at any given time.')
lucentPM4FMChasTrapBoardID = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapBoardID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapBoardID.setDescription('Board ID is the board number for which this trap is stored. If the trap is for an auxillary device such as a power supply or fan, this value is set to zero.')
lucentPM4FMChasTrapUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 3), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitType.setDescription('Uniquely specifies the unit type for this trap. The unit can be a board or any other device in the chassis such as a fan or a power supply.')
lucentPM4FMChasTrapUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitIndex.setDescription('Uniquely specifies the unit index. The unit index is same as the ifIndex for T1/E1 interfaces, or the modemIndex for the modems or fan or power supply index for fan or power supplies.')
lucentPM4FMChasTrapStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("online", 2), ("offline", 3), ("maintenance", 4), ("fault", 5), ("notinstalled", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapStatus.setDescription('Trap status specifies the associated object in the Trap is online(2), offline(3), maintenance(4) or fault(5).')
lucentPM4FMChasTrapSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 6), PMAlarmType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapSeverity.setDescription('Trap severity specifies the severity of the Trap for the associated object.')
lucentPM4FMChasTrapTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapTimeStamp.setDescription('This object stores the timestamp of this trap.')
lucentPM4FMChasTrapState = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trapsent", 1), ("ackdue", 2), ("acked", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMChasTrapState.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapState.setDescription('This object stores the Trap state of this trap.')
lucentPM4FMBoardIndex = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 2), Integer32())
if mibBuilder.loadTexts: lucentPM4FMBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMBoardIndex.setDescription('Board index uniquely specifies the board in the chassis. This object is set to zero for power supplies, fans and other auxillary devices. This object is not accessible through Get, Get-Next or Set PDUs. It is sent out as part of the Trap.')
lucentPM4FMUnitIndex = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 3), Integer32())
if mibBuilder.loadTexts: lucentPM4FMUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitIndex.setDescription('Unit index uniquely specifies the T1/E1 line, or modem or any device (logical/physical) in the chassis. This object is not accessible through Get, Get-Next or Set PDUs. It is sent out as part of the Trap.')
lucentPM4FMUnitType = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 4), PMUnitType())
if mibBuilder.loadTexts: lucentPM4FMUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitType.setDescription('Unit type specifies the T1/E1 line, or modem or any device in the chassis. This object is not accessible through Get, Get-Next or Set PDUs. It is sent out as part of the Trap.')
lucentPM4FMUnitTrapStatus = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))).clone(namedValues=NamedValues(("other", 1), ("offline", 2), ("online", 3), ("failed", 4), ("restored", 5), ("pwrwarn", 6), ("tempwarn", 7), ("temphot", 8), ("dtrloss", 9), ("carrierloss", 10), ("renegotiation", 11), ("los", 12), ("ais", 13), ("redalarm", 14), ("yellowalarm", 15), ("cv", 16), ("crc", 17), ("bpv", 18), ("fer", 19), ("pll", 20), ("es", 21), ("ses", 22), ("sefs", 23), ("uas", 24), ("dm", 25), ("les", 26), ("css", 27), ("bes", 28))))
if mibBuilder.loadTexts: lucentPM4FMUnitTrapStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitTrapStatus.setDescription('Trap status specifies the associated object in the Trap. This object is not accessible other than when produced as the result of a trap.')
lucentPM4FMUnitTrapSeverity = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 6), PMAlarmType())
if mibBuilder.loadTexts: lucentPM4FMUnitTrapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitTrapSeverity.setDescription('Trap severity specifies the severity of the Trap for the associated object. This object is not accessible except when produced as the result of a trap.')
lucentPM4FMTrapConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2))
lucentPM4FMEqpTrapCfg = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1), )
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfg.setDescription('Equipment Trap configuration table configure Traps. The objects in this table are used to enable or disable traps on a per board/interface/device basis. This object is not-accessible and present for MIB clarity.')
lucentPM4FMEqpTrapCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEqpBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEqpUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEqpUnitIndex"))
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfgEntry.setDescription('Entry in the equipment Trap config table. Each trap is uniquely identified by the board ID, Unit type and unit index. For auxillary devices such as power supplies and fans, the board index will be zero, the unit index identifies the units and the unit type specifies if the unit is a fan, power supplies etc. This object is not accessible and present for MIB clarity.')
lucentPM4FMEqpBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEqpBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpBoardIndex.setDescription('Board ID for which the Trap configuration need to apply. The board ID is zero if this trap configuration is for an auxillary device such as fans or power supplies.')
lucentPM4FMEqpUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEqpUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpUnitType.setDescription('Unit type alongwith the board index and unit index specifies uniquely the device/interface which is being configured.')
lucentPM4FMEqpUnitIndex = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEqpUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpUnitIndex.setDescription('Unit index refers to the interface or sub-device such as a modem, serial port etc. For devices such as power supplies and fans this object is zero.')
lucentPM4FMEqpTrapId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("boardoffline", 1), ("boardonline", 2), ("pwrsupfail", 3), ("pwrsuprestored", 4), ("fanfail", 5), ("fanrestored", 6), ("boardtempwarn", 7), ("boardtempnormal", 8), ("boardtoohot", 9), ("modemfail", 10), ("linedown", 11), ("lineup", 12), ("linethresh", 13), ("boardpwrshutdown", 14), ("radiusauthfail", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEqpTrapId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapId.setDescription('Trap ID indicating the trap for which the configuration must apply.')
lucentPM4FMEqpTrapCtl = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCtl.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCtl.setDescription('Trap control which configures the trap off(1) or on(2). When the trap is configured as off(1), the trap is not sent out to the management station. When configures as on(2), the trap is sent to all the management stations configured to receive the trap.')
lucentPM4FMEqpRepTimer = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEqpRepTimer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpRepTimer.setDescription('If the trap is to be repeated, this object specifies the time in seconds. When this object value is set to 0, it indicates the trap is non-repeat trap.')
lucentPM4FMT1E1ThreshTrapCfg = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2), )
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfg.setDescription('T1/E1 Threshold Trap configuration table to configure the thresholds for various T1/E1 traps. This object is not-accessible and present for MIB clarity.')
lucentPM4FMT1E1ThreshTrapCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMThreshBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMThreshUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMThreshUnitIndex"))
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfgEntry.setDescription('Entry in the T1/E1 threshold trap config table. Each trap is uniquely identified by the board index, unit type and unit index which is the T1/E1 interface number. This object is not accessible and present for MIB clarity.')
lucentPM4FMT1E1ThreshBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBoardIndex.setDescription('Board ID for which the Trap threshold configuration must apply. It includes boards 1 - 10 and other devices such as power supplies and fans etc.')
lucentPM4FMT1E1ThreshUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUnitType.setDescription('Unit type for which the Trap threshold configuration must be applied.')
lucentPM4FMT1E1ThreshESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 3), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshESs.setDescription('The threshold for errored seconds on the interface. A trap is issued when this set limit is exceeded. When this threshold exceeds, the performance of the interface will degrade. A trap is generated to notify the adminstrator to take appropriate action.')
lucentPM4FMT1E1ThreshSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 4), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSESs.setDescription('The threshold for severely errored seconds on the interface. A trap is issued when this limit is exceeded. A severely errored seconds is a second with 320 or more path code violation error events or one or more out of frame defects or detected AIS defect.')
lucentPM4FMT1E1ThreshSEFSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 5), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSEFSs.setDescription('The threshold for severely errored framing seconds. A trap is issued when this threshold is exceeded. A severely errored framing second is a second with one or more frame defects or detected AIS defect.')
lucentPM4FMT1E1ThreshUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 6), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUASs.setDescription('The threshold for unavailable seconds. A trap is issued when this set threshold is exceeded. Unavailable seconds are calculated by counting the number of seconds that the interface is unavailable from the onset of 10 SESs. Once unavailable, the interface becomes available at the onset of 10 contiguous no SESs.')
lucentPM4FMT1E1ThreshCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 7), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshCSSs.setDescription('The threshold for controlled slip seconds on the interface. A trap is issued when this set threshold is exceeded. A controlled slip second is a one-second interval containing one or more controlled slips.')
lucentPM4FMT1E1ThreshPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 8), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshPCVs.setDescription('The threshold for path code violations on the interface. A trap is issued when this set threshold is exceeded. PCV is a frame syncronization bit error in the D4 and E1-noCRC format interfaces or a CRC error in the ESF (extended super frame) and E1-CRC interface formats.')
lucentPM4FMT1E1ThreshLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 9), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshLESs.setDescription('The threshold for line errored seconds on the interface. A trap is sent to the manager when this set threshold is exceeded. A line errored second, according to T1M1.3 is a second in which one or more line code violations were detected.')
lucentPM4FMT1E1ThreshBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 10), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBESs.setDescription('The threshold for bursty errored seconds on the interface. A trap is sent to the manager when this set threshold is exceeded. A bursty errored second is a second with fewer than 320 and more than 1 path code violations.')
lucentPM4FMT1E1ThreshDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 11), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshDMs.setDescription('The threshold for degraded minutes on the interface. A trap is sent to the manager when this set threshold is exceeded. Degraded minutes are determined by collecting all of the available seconds, after removing any severely errored seconds. The resulting seconds is grouped into 60 second intervals and if the cumulative errors during the seconds present in the group exceeds 1E-6.')
lucentPM4FMT1E1ThreshRepTimer = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshRepTimer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshRepTimer.setDescription('If the trap is to be repeated, this object specifies the time in seconds. When this object value is set to 0, it indicates the trap is non-repeat trap.')
lucentPM4FMT1E1ThreshTrapAck = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("noack", 2), ("ack", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapAck.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapAck.setDescription('If set to ack(3), clears the trap condition. If the value is set to noack(2), leaves the trap condition unchanged.')
lucentPM4FMEnvTrapCfg = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3), )
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfg.setDescription('Environment Trap configuration table. This table enables configuration of voltage, power levels and temperature ranges for different units in the chassis. This object is not-accessible and present for MIB clarity.')
lucentPM4FMEnvTrapCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEnvBoardID"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEnvUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEnvUnitIndex"))
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfgEntry.setDescription('Entry in the environment trap config table. Each trap is uniquely identified by the board index, unit type and unit index. This object is not accessible and present for MIB clarity.')
lucentPM4FMEnvBoardID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEnvBoardID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvBoardID.setDescription('Board ID specifies the board identifier for this trap. If the trap configuration is for an auxillary device such as a power supply or fan, this object will be set to zero. The unit type and the unit index will uniquely identify the auxillary devices.')
lucentPM4FMEnvUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitType.setDescription('Unit for which the Trap configuration must to apply.')
lucentPM4FMEnvUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitIndex.setDescription('Unit index specifies the interface or sub-unit for this trap (modem or T1/E1 interface etc.). The unit type and the unit index will uniquely identify the auxillary devices.')
lucentPM4FMEnvOptUnitTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitTemp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitTemp.setDescription('The optimum temperature for this unit. A trap is generated when the temperature deviates from the specified range. The temperature is specified as an integer in degrees farenheit.')
lucentPM4FMEnvUnitTempRange = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitTempRange.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitTempRange.setDescription('The temperature range above which a trap is generated. The temperature must be specified as an integer in degree Farenhiet (for example +/- 5 degree Far.).')
lucentPM4FMEnvOptUnitPwrLvl = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitPwrLvl.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitPwrLvl.setDescription('The optimal power level that is appropriate for this unit. A trap is generated when the power level fluctuates outside the limits set.')
lucentPM4FMEnvUnitPwrRange = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitPwrRange.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitPwrRange.setDescription('The power range specified in volts. A trap is generated when the power level fluctuates outside the Opt Pwr +/- Range set.')
lucentPM4FMEnvTrapCtl = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCtl.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCtl.setDescription('The trap control used to turn the environment traps on or off for the specified unit(s).')
lucentPM4PerfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6))
lucentPM4T1E1PerfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1))
lucentPM4T1E1PMCur = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1), )
if mibBuilder.loadTexts: lucentPM4T1E1PMCur.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCur.setDescription('Performance management table representing the performance statistics of T1/E1 interfaces in the box. This table represents the current 15 mins statistics. This object is not accessible and present for clarity purpose. This table is part of RFC 1406.')
lucentPM4T1E1PMCurEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMBoardID"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMLineNum"))
if mibBuilder.loadTexts: lucentPM4T1E1PMCurEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurEntry.setDescription('Performance management table entries for all the T1/E1 interfaces in the box. This table represents the current 15 mins statistics. This object is not accessible and present for clarity purpose.')
lucentPM4T1E1PMCurBoard = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBoard.setDescription('Board number of the interface. The global interface number is computed by using the Most Significant byte of the ifIndex and the Least Significant 2 bytes represents the interface index. Byte 3 will represent the unit type which would be a T1 or E1.')
lucentPM4T1E1PMCurUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUnitType.setDescription('Unit type indicates the type of interface as T1/E1 or T3/E3 in future. This is part of the interface table ifIndex which is constructed with boardID, unit type and unit index.')
lucentPM4T1E1PMCurLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLineNum.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLineNum.setDescription('Line number uniquely identifies the T1/E1 interface on a given board.')
lucentPM4T1E1PMCurIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurIfIndex.setDescription('Interface table ifIndex cross reference. The global interface number is computed by using the Most Significant byte as the board ID and the Least Significant 2 bytes represents the interface index. The third byte represents the unit type which will be a T1 or E1. Thus board 0 interface 3 is represented as 0x00050003. The global interface number corresponds to the IfIndex in MIB II.')
lucentPM4T1E1PMCurESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurESs.setDescription('The number of errored seconds, encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSESs.setDescription('The number of Severely Errored Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurSEFSs = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSEFSs.setDescription('The number of Severely Errored Framing Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUASs.setDescription('The number of Unavailable Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurCSSs.setDescription('The number of Controlled Slip Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurPCVs.setDescription('The number of Path Coding Violations encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLESs.setDescription('The number of Line Errored Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBESs.setDescription('The number of Bursty Errored Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurDMs.setDescription('The number of Degraded Minutes encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurLCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLCVs.setDescription('The number of Line Code Violations encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMInt = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2), )
if mibBuilder.loadTexts: lucentPM4T1E1PMInt.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMInt.setDescription('Performance management table representing the performance statistics of T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose. This table is part of RFC 1406.')
lucentPM4T1E1PMIntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntBoard"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntLineNum"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntInterval"))
if mibBuilder.loadTexts: lucentPM4T1E1PMIntEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntEntry.setDescription('Performance management table entries for all the T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose.')
lucentPM4T1E1PMIntBoard = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBoard.setDescription('Board number of the interface. The global interface number is computed by using the Most Significant nibble of the ifIndex and the Least Significant nibble represents the interface index. Thus board 0 interface 3 is represented as 0x03 or 03 decimal and board 10 interface 3 is represented as 0xa3 or 163 decimal. In an integer, of 4 bytes wide, the 3 MSBytes will all be zeros. The global interface number corresponds to the IfIndex of MIB II.')
lucentPM4T1E1PMIntUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUnitType.setDescription('Unit type indicates the type of physical or logical device. The unit type for this table is either T1 or E1.')
lucentPM4T1E1PMIntLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLineNum.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLineNum.setDescription('Line number uniquely identifies the T1/E1 interface for this board.')
lucentPM4T1E1PMIntInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntInterval.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntInterval.setDescription('Interval number for a given board. The 24 hr period is divided into 96 15 min intervals, where 1 is the most recent and 96 is the least recent.')
lucentPM4T1E1PMIntIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntIfIndex.setDescription('Interface table ifIndex cross reference. The global interface number is computed by using the Most Significant byte as the board ID and the Least Significant 2 bytes represents the interface index. The third byte represents the unit type which will be a T1 or E1. Thus board 0 interface 3 is represented as 0x00050003. The global interface number corresponds to the IfIndex in MIB II.')
lucentPM4T1E1PMIntESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntESs.setDescription('The number of errored seconds, encountered by the line in the last 24 hrs divided into 96 15 mins intervals.')
lucentPM4T1E1PMIntSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSESs.setDescription('The number of Severely Errored Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntSEFSs = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSEFSs.setDescription('The number of Severely Errored Framing Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUASs.setDescription('The number of Unavailable Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntCSSs.setDescription('The number of Controlled Slip Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntPCVs.setDescription('The number of Path Coding Violations encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLESs.setDescription('The number of Line Errored Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBESs.setDescription('The number of Bursty Errored Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntDMs.setDescription('The number of Degraded Minutes encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntLCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLCVs.setDescription('The number of Line Code Violations encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMTotal = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3), )
if mibBuilder.loadTexts: lucentPM4T1E1PMTotal.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotal.setDescription('Performance management table representing the performance statistics of T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose. This table is part of RFC 1406.')
lucentPM4T1E1PMTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalBoard"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalLineNum"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalInterval"))
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalEntry.setDescription('Performance management table entries for all the T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose.')
lucentPM4T1E1PMTotalBoard = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBoard.setDescription('Board number of the interface. The global interface number is computed by using the Most Significant nibble of the ifIndex and the Least Significant nibble represents the interface index. Thus board 0 interface 3 is represented as 0x03 or 03 decimal and board 10 interface 3 is represented as 0xa3 or 163 decimal. In an integer, of 4 bytes wide, the 3 MSBytes will all be zeros. The global interface number corresponds to the IfIndex of MIB II. This table stores the cumulative values for the past 24 hr period.')
lucentPM4T1E1PMTotalUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUnitType.setDescription('Unit type indicates the type of physical or logical device. The unit type for this table is either T1 or E1.')
lucentPM4T1E1PMTotalLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLineNum.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLineNum.setDescription('Interface number for a given board. The global interface number is computed by using the Most Significant nibble of the ifIndex and the Least Significant nibble represents the interface index. Thus board 0 interface 3 is represented as 0x03 or 03 decimal and board 10 interface 3 is represented as 0xa3 or 163 decimal. In an integer, of 4 bytes wide, the 3 MSBytes will all be zeros. The global interface number corresponds to the IfIndex in MIB II.')
lucentPM4T1E1PMTotalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalIfIndex.setDescription('IfIndex cross reference value. This value is obtained from the Board/board number and interface number by combining them into the LSByte. The upper nibble represents the board and the lower nibble represents the line number.')
lucentPM4T1E1PMTotalESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalESs.setDescription('The cumulative value of errored seconds, encountered by the line in the last 24 hrs.')
lucentPM4T1E1PMTotalSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSESs.setDescription('The cumulative value Severely Errored Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalSEFSs = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSEFSs.setDescription('The cumulative value of Severely Errored Framing Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUASs.setDescription('The cumulative value of Unavailable Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalCSSs.setDescription('The cumulative value of Controlled Slip Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalPCVs.setDescription('The cumulative value of Path Coding Violations encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLESs.setDescription('The cumulative value of Line Errored Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBESs.setDescription('The cumulative value of Bursty Errored Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalDMs.setDescription('The cumulative value of Degraded Minutes encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalLCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLCVs.setDescription('The cumulative value of Line Code Violations encountered by the line for the 24 hr period.')
lucentPM4SecurityMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 7))
lucentPM4AcctMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8))
lucentPM4AcctMgmtComm = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1))
lucentPM4SnmpCommTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1), )
if mibBuilder.loadTexts: lucentPM4SnmpCommTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommTable.setDescription('The SNMP Community Table. This table contains entries to restrict the SNMP get and set operations.')
lucentPM4SnmpCommEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4SnmpCommName"))
if mibBuilder.loadTexts: lucentPM4SnmpCommEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommEntry.setDescription('The entries in the community table.')
lucentPM4SnmpCommIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SnmpCommIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommIndex.setDescription('The index of the command in the command table. A MAX of 10 network management stations must be specified along with their community names.')
lucentPM4SnmpCommName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: lucentPM4SnmpCommName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommName.setDescription('The name of the SNMP Community for SNMP readers and writers. The size of the string is limited to 32 characters. All characters in the string must be printable.')
lucentPM4SnmpCommIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommIpAddr.setDescription('The IP Address of the remote community.')
lucentPM4SnmpCommReadAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("ensable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommReadAccess.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommReadAccess.setDescription('Read access enable or disable for this community. When enabled, it allows read-only variable access using this community string by the SNMP client.')
lucentPM4SnmpCommWriteAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("ensable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommWriteAccess.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommWriteAccess.setDescription('Write access enable or disable for this community. When enabled, the agent allows write access to the parameters on the agent by the SNMP clients.')
lucentPM4SnmpCommTraps = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("ensable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommTraps.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommTraps.setDescription('Traps receiving capability enable or disable for this community. When enabled, the SNMP agent forwards the traps generated in the box to this SNMP client.')
lucentPM4SnmpCommStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommStatus.setDescription('The status of the entry for this community. If the status is set to normal, it allows requests from this SNMP client else it discards the requests from this client.')
lucentPM4SnmpCommLastError = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 511))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SnmpCommLastError.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommLastError.setDescription('If there is an error on a request, this variable may contain a message indicating the error.')
lucentPM4AcctMgmtCallEvent = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2))
lucentPM4AMCallEventTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1), )
if mibBuilder.loadTexts: lucentPM4AMCallEventTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCallEventTable.setDescription('Call accounting table containing a list of call events, which may be used for billing purposes.')
lucentPM4AMCallEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4AMCEIndex"))
if mibBuilder.loadTexts: lucentPM4AMCallEventEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCallEventEntry.setDescription('The entries in the accounting/billing table.')
lucentPM4AMCEIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEIndex.setDescription('Call event index used as an index into the call event table. The table stores call events which may be used for billing.')
lucentPM4AMCETimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCETimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCETimeStamp.setDescription('Time stamp for this event in seconds since the last reboot.')
lucentPM4AMCEType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("calloriginated", 1), ("callanswered", 2), ("callcleared", 3), ("servicechanged", 4), ("namechanged", 5), ("baudratechanged", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEType.setDescription('Specifies the type of event associated with this entry in the call event table.')
lucentPM4AMCESvcType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))).clone(namedValues=NamedValues(("none", 1), ("ppp", 2), ("slip", 3), ("mpp", 4), ("x25", 5), ("combinet", 6), ("frameRelay", 7), ("euraw", 8), ("euui", 9), ("telnet", 10), ("telnetBinary", 11), ("rawTcp", 12), ("terminalServer", 13), ("mp", 14), ("virtualConnect", 15), ("x25DChannel", 16)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCESvcType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCESvcType.setDescription('The type of service provided to the user. This field is meaningful if the event type is servicechanged(4), or namechanged(5) events. In all other cases, this object must return none(1).')
lucentPM4AMCEUName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEUName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEUName.setDescription('User name of the dialed in user. This object returns the valid user name when the event type is servicechanged(4) or namechanged(5). In all other cases, it returns a NULL.')
lucentPM4AMCEModemBoard = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEModemBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEModemBoard.setDescription('Board ID of the modem which handled this call. This value can be used to diagnose modem related problems (dropping the call, retraining too frequently etc.).')
lucentPM4AMCEModemID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEModemID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEModemID.setDescription('Identifies the specific modem on a board which handled this call. Can be used to diagnose modem related problems.')
lucentPM4AMCEModemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEModemPort.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEModemPort.setDescription('A textual string containing the name of the serial interface (ie. S0, S1, etc).')
lucentPM4AMCEDataRate = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEDataRate.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEDataRate.setDescription('Specifies the speed of this connection. Speed is specified as baud rate for modem calls and receive data rate for ISDN calls. This object returns a 0 for call answered and call cleared events.')
lucentPM4AMCECallingPartyID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCECallingPartyID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCECallingPartyID.setDescription('Calling party ID. This object is valid only for call answered, call originated, and call cleared events. For all invalid event types, this object is set to NULL.')
lucentPM4AMCECalledPartyID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCECalledPartyID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCECalledPartyID.setDescription('Called party ID. This object is valid only for call answered, call originated, and call cleared events. For all invalid event types, this object is set to NULL.')
lucentPM4AMCEInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEInOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEInOctets.setDescription('Total octets received during this call. This object is cleared at the end of each call.')
lucentPM4AMCEOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEOutOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEOutOctets.setDescription('Total octets sent out during this call. This object is cleared at the end of each call.')
lucentPM4AMCECallCharge = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCECallCharge.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCECallCharge.setDescription('Call charge for this call. This object is valid only when the event is call cleared. For all other events this object is set to zero.')
lucentPM4AMCEDisconnReason = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 60, 61, 62, 63, 64, 65, 66, 67, 68, 100, 101, 102, 120, 150, 151, 152, 160, 170, 180, 185, 190, 195, 201, 210))).clone(namedValues=NamedValues(("notApplicable", 1), ("unknown", 2), ("disconnected", 3), ("clidAuthFailed", 4), ("clidAuthServTimeout", 5), ("clidAuthRequestCallback", 6), ("preT310Timeout", 7), ("noModemAvailable", 9), ("noModemNoCarrier", 10), ("noModemLossCarrier", 11), ("noModemResultCodes", 12), ("noModemOpenFailed", 13), ("noModemOpenFailedDiag", 14), ("tsUserExit", 20), ("tsIdleTimeout", 21), ("tsExitTelnet", 22), ("tsNoIPAddr", 23), ("tsExitTcp", 24), ("tsPassWordFail", 25), ("tsRawTCPDisable", 26), ("tsControlC", 27), ("tsDestroyed", 28), ("tsClosedVirtualConnect", 29), ("tsVirtualConnectDestroyed", 30), ("tsExitRlogin", 31), ("tsRloginBadOption", 32), ("tsErrorResource", 33), ("mpNullMessageTimeout", 35), ("pppLcpTimeout", 40), ("pppLcpNegotiateFail", 41), ("pppPAPAuthFail", 42), ("pppCHAPAuthFail", 43), ("pppRemoteAuthFail", 44), ("pppRcvTerminate", 45), ("pppCloseEvent", 46), ("pppCloseNoNcpsOpened", 47), ("pppCloseUnknownMpBundle", 48), ("pppCloseMpAddChanFail", 49), ("tsExitErrTooMany", 50), ("tsExitErrResource", 51), ("tsExitErrInvalidIP", 52), ("tsExitErrHostName", 53), ("tsExitErrBadPort", 54), ("tsExitErrHostReset", 60), ("tsExitErrConnRefused", 61), ("tsExitErrTimedOut", 62), ("tsExitErrClosed", 63), ("tsExitErrNetUnreach", 64), ("tsExitErrHostUnreach", 65), ("tsExitErrNetAdminUnreach", 66), ("tsExitErrHostAdminUnreach", 67), ("tsExitErrPortUnreach", 68), ("sessTimeOut", 100), ("sessFailSecurity", 101), ("sessCallback", 102), ("invalidProtocol", 120), ("requestByRadiusClient", 150), ("localAdmin", 151), ("localSnmp", 152), ("v110Timeout", 160), ("pppAuthTimeout", 170), ("userCallClearRequest", 180), ("remoteEndHungUp", 185), ("resourceQuiesced", 190), ("maxCallDurationReached", 195), ("lowMemory", 201), ("boardDied", 210)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEDisconnReason.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEDisconnReason.setDescription('Reason for the disconnect.')
lucentPM4BoardOfflineTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,1)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardOfflineTrap.setDescription('Board down trap. The variable bindings in the Trap packet provide information about the chassis name, board number and the trap status. This Trap must be cleared manually.')
lucentPM4BoardOnlineTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,2)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardOnlineTrap.setDescription('Board up trap. The variable bindings in the Trap packet provide information about the chassis name, board number and the trap status. This Trap must be cleared manually.')
lucentPM4PwrSupFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,3)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4PwrSupFailTrap.setDescription('Power supply failed trap. The variable bindings in the Trap packet provide information about the chassis name, power supply unit and the trap status. This Trap must be cleared manually.')
lucentPM4PwrSupWarnTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,4)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4PwrSupWarnTrap.setDescription('Power supply warning trap. The variable bindings in the Trap packet provide information about the chassis name, power supply unit and the trap status. This Trap is issued when the power supply fluctuates between a set threshold. This Trap must be cleared manually.')
lucentPM4PwrSupRestoredTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,5)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4PwrSupRestoredTrap.setDescription('Power supply restored trap. The variable bindings in the Trap packet provide information about the chassis name, power supply unit and the trap status. This Trap is issued when a failed power supply is restored. This must be cleared manually.')
lucentPM4FanFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,6)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4FanFailTrap.setDescription('Fan failure trap. The variable bindings in the Trap packet provide information about the chassis name, fan number and the trap status. This Trap must be cleared manually.')
lucentPM4FanRestoredTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,7)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4FanRestoredTrap.setDescription('Fan restored trap. The variable bindings in the Trap packet provide information about the chassis name, fan number and the trap status. This Trap is issued when the failed fan is restored. This trap must be cleared manually.')
lucentPM4BoardTempWarnTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,8)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardTempWarnTrap.setDescription('Board temperature warning trap. The variable bindings in the Trap packet provide information about the chassis name, unit and the trap status. This Trap is issued when the board temperature exceeds a set threshold value. This trap must be cleared manually.')
lucentPM4BoardTempNormalTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,9)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardTempNormalTrap.setDescription('Board temperature normal trap. The variable bindings in the Trap packet provide information about the chassis name, unit and the trap status. This Trap is issued when the board temperature returns to normal. This trap must be cleared manually.')
lucentPM4BoardTooHotTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,10)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardTooHotTrap.setDescription('Board trap. The variable bindings in the Trap packet provide information about the chassis name, board number and the trap status. This Trap must be cleared manually.')
lucentPM4ModemFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,11)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4ModemFailTrap.setDescription('Modem failure trap. The variable bindings in the Trap packet provide information about the chassis name, modem number and the trap status. This Trap must be cleared manually.')
lucentPM4T1E1LineDownTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,12)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4T1E1LineDownTrap.setDescription('T1/E1 Line trap. The variable bindings in the Trap packet provide all the information for the clients to display the Board ID, Line ID and the status of the line. This Trap could be generated when the line comes up or goes down once. It must be cleared manually.')
lucentPM4T1E1LineUpTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,13)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4T1E1LineUpTrap.setDescription('T1/E1 Line trap. The variable bindings in the Trap packet provide all the information for the clients to display the Board ID, Line ID and the status of the line. This Trap could be generated when the line comes up or goes down once. It must be cleared manually.')
lucentPM4T1E1LineThreshTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,14)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4T1E1LineThreshTrap.setDescription('T1/E1 Line trap. The variable bindings in the Trap packet provide all the information for the clients to display the Board ID, Line ID and the trap type. This Trap could be generated when the thresholds for the various performance statistics (ES, SES etc.) exceed. It must be cleared manually.')
lucentPM4BoardPwrOffTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,15)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardPwrOffTrap.setDescription('This trap is issued when the power supply to the board is not enough. The variable bindings in the Trap packet provide information about the chassis name, board/board number and the trap status. This Trap must be cleared manually.')
lucentPM4RadiusAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,16)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"))
if mibBuilder.loadTexts: lucentPM4RadiusAuthFailTrap.setDescription('This trap is issued when the Radius authentication fails. This Trap must be cleared manually. The trap provides information about the board and the modem number.')
mibBuilder.exportSymbols("LIVINGSTON-PM4-MIB", lucentPM4FaultMgmt=lucentPM4FaultMgmt, lucentPM4SerialUser=lucentPM4SerialUser, lucentPM4T1E1ChangeTime=lucentPM4T1E1ChangeTime, lucentPM4EtherOptNoClip=lucentPM4EtherOptNoClip, lucentPM4EtherOutFilter=lucentPM4EtherOutFilter, lucentPM4FMEnvOptUnitPwrLvl=lucentPM4FMEnvOptUnitPwrLvl, lucentPM4T1E1PMIntLESs=lucentPM4T1E1PMIntLESs, lucentPM4ModemUnitType=lucentPM4ModemUnitType, PMAlarmType=PMAlarmType, lucentPM4SerialIndex=lucentPM4SerialIndex, lucentPM4SerialQOctets=lucentPM4SerialQOctets, lucentPM4ModemIndex=lucentPM4ModemIndex, lucentPM4SerialPortNumber=lucentPM4SerialPortNumber, lucentPM4SerialTypeDevice=lucentPM4SerialTypeDevice, lucentPM4EtherMacAddress=lucentPM4EtherMacAddress, lucentPM4EtherOptSlip=lucentPM4EtherOptSlip, lucentPM4FMEnvUnitPwrRange=lucentPM4FMEnvUnitPwrRange, lucentPM4T1E1PMCurUASs=lucentPM4T1E1PMCurUASs, lucentPM4CmT1E1=lucentPM4CmT1E1, lucentPM4EtherOptNetBIOS=lucentPM4EtherOptNetBIOS, lucentPM4EtherSubnetMask=lucentPM4EtherSubnetMask, lucentPM4EtherOptAccounting=lucentPM4EtherOptAccounting, lucentPM4SerialInSpeed=lucentPM4SerialInSpeed, lucentPM4T1E1Encoding=lucentPM4T1E1Encoding, lucentPM4Traps=lucentPM4Traps, lucentPM4FMT1E1ThreshTrapAck=lucentPM4FMT1E1ThreshTrapAck, lucentPM4T1E1PMTotal=lucentPM4T1E1PMTotal, lucentPM4Chassis=lucentPM4Chassis, lucentPM4T1E1PMCurDMs=lucentPM4T1E1PMCurDMs, lucentPM4FMChasTrapState=lucentPM4FMChasTrapState, lucentPM4AMCECallCharge=lucentPM4AMCECallCharge, lucentPM4SerialStarted=lucentPM4SerialStarted, PMUnitType=PMUnitType, lucentPM4T1E1StartMode=lucentPM4T1E1StartMode, lucentPM4FMUnitTrapSeverity=lucentPM4FMUnitTrapSeverity, lucentPM4T1E1PMCurCSSs=lucentPM4T1E1PMCurCSSs, lucentPM4T1E1BlueAlarms=lucentPM4T1E1BlueAlarms, lucentPM4ChasCmdId=lucentPM4ChasCmdId, lucentPM4BoardTempWarnTrap=lucentPM4BoardTempWarnTrap, lucentPM4T1E1Status=lucentPM4T1E1Status, lucentPM4FMUnitTrapStatus=lucentPM4FMUnitTrapStatus, lucentPM4FMT1E1ThreshTrapCfg=lucentPM4FMT1E1ThreshTrapCfg, lucentPM4FMT1E1ThreshPCVs=lucentPM4FMT1E1ThreshPCVs, lucentPM4FMEnvBoardID=lucentPM4FMEnvBoardID, lucentPM4FMChasTrapEntry=lucentPM4FMChasTrapEntry, lucentPM4FMT1E1ThreshBESs=lucentPM4FMT1E1ThreshBESs, lucentPM4EtherOptDns=lucentPM4EtherOptDns, lucentPM4FMT1E1ThreshSEFSs=lucentPM4FMT1E1ThreshSEFSs, lucentPM4T1E1PMIntIfIndex=lucentPM4T1E1PMIntIfIndex, lucentPM4FMEnvUnitIndex=lucentPM4FMEnvUnitIndex, lucentPM4ModemCalls=lucentPM4ModemCalls, lucentPM4AMCEIndex=lucentPM4AMCEIndex, lucentPM4SerialTypeLogin=lucentPM4SerialTypeLogin, lucentPM4FMTrapConfig=lucentPM4FMTrapConfig, lucentPM4T1E1LineDownTrap=lucentPM4T1E1LineDownTrap, lucentPM4SerialTypeNwDialIn=lucentPM4SerialTypeNwDialIn, lucentPM4FMT1E1ThreshDMs=lucentPM4FMT1E1ThreshDMs, lucentPM4T1E1PMTotalCSSs=lucentPM4T1E1PMTotalCSSs, lucentPM4FMT1E1ThreshCSSs=lucentPM4FMT1E1ThreshCSSs, lucentPM4MibRev=lucentPM4MibRev, lucentPM4T1E1PMCurSEFSs=lucentPM4T1E1PMCurSEFSs, lucentPM4SnmpCommTraps=lucentPM4SnmpCommTraps, lucentPM4AMCEModemBoard=lucentPM4AMCEModemBoard, lucentPM4T1E1PMCurESs=lucentPM4T1E1PMCurESs, lucentPM4FMT1E1ThreshESs=lucentPM4FMT1E1ThreshESs, lucentPM4ModemRetrains=lucentPM4ModemRetrains, lucentPM4SerialSessionId=lucentPM4SerialSessionId, lucentPM4SerialEntry=lucentPM4SerialEntry, lucentPM4ChasCmdResult=lucentPM4ChasCmdResult, lucentPM4EtherOptBcastHigh=lucentPM4EtherOptBcastHigh, lucentPM4ChasCmdDevId=lucentPM4ChasCmdDevId, lucentPM4T1E1SuperSignal=lucentPM4T1E1SuperSignal, PMDiagCmdStatus=PMDiagCmdStatus, lucentPM4CmInterfaces=lucentPM4CmInterfaces, lucentPM4T1E1PMCurBESs=lucentPM4T1E1PMCurBESs, lucentPM4SnmpCommIpAddr=lucentPM4SnmpCommIpAddr, lucentPM4AMCETimeStamp=lucentPM4AMCETimeStamp, lucentPM4PerfMgmt=lucentPM4PerfMgmt, PMDiagTestCntrl=PMDiagTestCntrl, lucentPM4T1E1PMTotalBoard=lucentPM4T1E1PMTotalBoard, lucentPM4SnmpCommReadAccess=lucentPM4SnmpCommReadAccess, lucentPM4ModemPortName=lucentPM4ModemPortName, lucentPM4AcctMgmt=lucentPM4AcctMgmt, lucentPM4T1E1PMTotalEntry=lucentPM4T1E1PMTotalEntry, lucentPM4SerialUnitType=lucentPM4SerialUnitType, lucentPM4ChasCmdTable=lucentPM4ChasCmdTable, lucentPM4ModemOutSpeed=lucentPM4ModemOutSpeed, lucentPM4FMEnvTrapCfg=lucentPM4FMEnvTrapCfg, lucentPM4SerialTypeHardwired=lucentPM4SerialTypeHardwired, lucentPM4FMT1E1ThreshSESs=lucentPM4FMT1E1ThreshSESs, lucentPM4BoardOfflineTrap=lucentPM4BoardOfflineTrap, lucentPM4T1E1PMIntPCVs=lucentPM4T1E1PMIntPCVs, lucentPM4FMEqpTrapCfg=lucentPM4FMEqpTrapCfg, lucentPM4T1E1UnitType=lucentPM4T1E1UnitType, lucentPM4SnmpCommStatus=lucentPM4SnmpCommStatus, lucentPM4CmSerial=lucentPM4CmSerial, lucentPM4T1E1BipolarErrors=lucentPM4T1E1BipolarErrors, lucentPM4ChasCmdUnitType=lucentPM4ChasCmdUnitType, lucentPM4T1E1PMInt=lucentPM4T1E1PMInt, lucentPM4ModemStatus=lucentPM4ModemStatus, lucentPM4ChasCmdParams=lucentPM4ChasCmdParams, lucentPM4AMCallEventEntry=lucentPM4AMCallEventEntry, lucentPM4ChasCmdIndex=lucentPM4ChasCmdIndex, lucentPM4EtherOptNoPAP=lucentPM4EtherOptNoPAP, lucentPM4FMEqpTrapCfgEntry=lucentPM4FMEqpTrapCfgEntry, lucentPM4T1E1PMIntCSSs=lucentPM4T1E1PMIntCSSs, lucentPM4T1E1PMTotalUASs=lucentPM4T1E1PMTotalUASs, lucentPM4EtherBoardIndex=lucentPM4EtherBoardIndex, lucentPM4T1E1PMIntESs=lucentPM4T1E1PMIntESs, lucentPM4EtherOptNoListen=lucentPM4EtherOptNoListen, lucentPM4ModemFailTrap=lucentPM4ModemFailTrap, lucentPM4FMChasTrapSeverity=lucentPM4FMChasTrapSeverity, lucentPM4FMEqpUnitIndex=lucentPM4FMEqpUnitIndex, lucentPM4EtherPriNameServer=lucentPM4EtherPriNameServer, lucentPM4SerialIpAddress=lucentPM4SerialIpAddress, lucentPM4SerialBoardIndex=lucentPM4SerialBoardIndex, lucentPM4FMEnvUnitType=lucentPM4FMEnvUnitType, lucentPM4AMCEModemID=lucentPM4AMCEModemID, lucentPM4T1E1PMIntLCVs=lucentPM4T1E1PMIntLCVs, lucentPM4SecurityMgmt=lucentPM4SecurityMgmt, lucentPM4T1E1PMIntUASs=lucentPM4T1E1PMIntUASs, lucentPM4T1E1PMIntBESs=lucentPM4T1E1PMIntBESs, lucentPM4T1E1PMTotalSESs=lucentPM4T1E1PMTotalSESs, lucentPM4T1E1PMCurPCVs=lucentPM4T1E1PMCurPCVs, lucentPM4T1E1PMTotalLineNum=lucentPM4T1E1PMTotalLineNum, lucentPM4AMCEDisconnReason=lucentPM4AMCEDisconnReason, lucentPM4T1E1PMCurIfIndex=lucentPM4T1E1PMCurIfIndex, lucentPM4SnmpCommWriteAccess=lucentPM4SnmpCommWriteAccess, lucentPM4FMEnvUnitTempRange=lucentPM4FMEnvUnitTempRange, lucentPM4FMT1E1ThreshUnitType=lucentPM4FMT1E1ThreshUnitType, lucentPM4EtherOptDefaultRip=lucentPM4EtherOptDefaultRip, lucentPM4AMCEUName=lucentPM4AMCEUName, lucentPM4FMBoardIndex=lucentPM4FMBoardIndex, lucentPM4BoardOnlineTrap=lucentPM4BoardOnlineTrap, lucentPM4T1E1SerialCount=lucentPM4T1E1SerialCount, lucentPM4AMCECallingPartyID=lucentPM4AMCECallingPartyID, lucentPM4SerialOutSpeed=lucentPM4SerialOutSpeed, lucentPM4AMCEOutOctets=lucentPM4AMCEOutOctets, lucentPM4T1E1Table=lucentPM4T1E1Table, lucentPM4ModemTable=lucentPM4ModemTable, lucentPM4EtherTable=lucentPM4EtherTable, lucentPM4T1E1LineUpTrap=lucentPM4T1E1LineUpTrap, lucentPM4EtherOptIPFilter=lucentPM4EtherOptIPFilter, lucentPM4EtherOptEtherIpx=lucentPM4EtherOptEtherIpx, lucentPM4AMCESvcType=lucentPM4AMCESvcType, lucentPM4T1E1PMIntSESs=lucentPM4T1E1PMIntSESs, lucentPM4T1E1PMIntDMs=lucentPM4T1E1PMIntDMs, lucentPM4T1E1SyncErrors=lucentPM4T1E1SyncErrors, lucentPM4T1E1PMTotalUnitType=lucentPM4T1E1PMTotalUnitType, lucentPM4T1E1PMTotalBESs=lucentPM4T1E1PMTotalBESs, lucentPM4T1E1PMCurSESs=lucentPM4T1E1PMCurSESs, lucentPM4T1E1PMTotalPCVs=lucentPM4T1E1PMTotalPCVs, lucentPM4T1E1PMIntUnitType=lucentPM4T1E1PMIntUnitType, lucentPM4EtherIfType=lucentPM4EtherIfType, lucentPM4AMCEModemPort=lucentPM4AMCEModemPort, PMEquipPRIStatus=PMEquipPRIStatus, lucentPM4CmModem=lucentPM4CmModem, lucentPM4ModemConnects=lucentPM4ModemConnects, lucentPM4SerialIdle=lucentPM4SerialIdle, lucentPM4PwrSupRestoredTrap=lucentPM4PwrSupRestoredTrap, lucentPM4AMCEType=lucentPM4AMCEType, lucentPM4AcctMgmtComm=lucentPM4AcctMgmtComm, lucentPM4EtherOptDefaultListen=lucentPM4EtherOptDefaultListen, lucentPM4EtherOptPmeMsg=lucentPM4EtherOptPmeMsg, lucentPM4T1E1PMCurEntry=lucentPM4T1E1PMCurEntry, lucentPM4EtherIndex=lucentPM4EtherIndex, lucentPM4ModemBoardIndex=lucentPM4ModemBoardIndex, lucentPM4FMChasTrapIndex=lucentPM4FMChasTrapIndex, lucentPM4T1E1PMTotalSEFSs=lucentPM4T1E1PMTotalSEFSs, lucentPM4FMChasTrapStatus=lucentPM4FMChasTrapStatus, lucentPM4FMEqpRepTimer=lucentPM4FMEqpRepTimer, lucentPM4FMT1E1ThreshLESs=lucentPM4FMT1E1ThreshLESs, lucentPM4T1E1SyncLoss=lucentPM4T1E1SyncLoss, lucentPM4T1E1YellowAlarms=lucentPM4T1E1YellowAlarms, lucentPM4FanFailTrap=lucentPM4FanFailTrap, lucentPM4FMT1E1ThreshTrapCfgEntry=lucentPM4FMT1E1ThreshTrapCfgEntry, lucentPM4SnmpCommName=lucentPM4SnmpCommName, lucentPM4T1E1PMIntSEFSs=lucentPM4T1E1PMIntSEFSs, lucentPM4ChasSummary=lucentPM4ChasSummary, lucentPM4T1E1PhysType=lucentPM4T1E1PhysType, lucentPM4EtherPortName=lucentPM4EtherPortName, lucentPM4T1E1PMCurLESs=lucentPM4T1E1PMCurLESs, lucentPM4ChasCmdEntry=lucentPM4ChasCmdEntry, lucentPM4FMEqpBoardIndex=lucentPM4FMEqpBoardIndex, lucentPM4T1E1RecvLevel=lucentPM4T1E1RecvLevel, lucentPM4SnmpCommIndex=lucentPM4SnmpCommIndex, lucentPM4FMChasTrapTimeStamp=lucentPM4FMChasTrapTimeStamp, lucentPM4AMCEDataRate=lucentPM4AMCEDataRate, lucentPM4T1E1PCM=lucentPM4T1E1PCM, lucentPM4EtherEntry=lucentPM4EtherEntry, lucentPM4ConfigMgmt=lucentPM4ConfigMgmt, lucentPM4FMEnvTrapCfgEntry=lucentPM4FMEnvTrapCfgEntry, lucentPM4FMChasTrapUnitIndex=lucentPM4FMChasTrapUnitIndex, lucentPM4FMEqpUnitType=lucentPM4FMEqpUnitType, lucentPM4SerialPhysType=lucentPM4SerialPhysType, lucentPM4FMChasTrapUnitType=lucentPM4FMChasTrapUnitType, lucentPM4FMEnvOptUnitTemp=lucentPM4FMEnvOptUnitTemp, lucentPM4T1E1LineThreshTrap=lucentPM4T1E1LineThreshTrap, lucentPM4FaultMgmtChasTrap=lucentPM4FaultMgmtChasTrap, lucentPM4ModemId=lucentPM4ModemId, lucentPM4CmEther=lucentPM4CmEther, lucentPM4T1E1CarrierLoss=lucentPM4T1E1CarrierLoss, lucentPM4T1E1Framing=lucentPM4T1E1Framing, lucentPM4T1E1PMIntEntry=lucentPM4T1E1PMIntEntry, lucentPM4T1E1Index=lucentPM4T1E1Index, lucentPM4T1E1PMCur=lucentPM4T1E1PMCur, lucentPM4ModemInByteCount=lucentPM4ModemInByteCount, lucentPM4T1E1Entry=lucentPM4T1E1Entry, lucentPM4T1E1BoardIndex=lucentPM4T1E1BoardIndex, lucentPM4EtherOptSnmp=lucentPM4EtherOptSnmp, lucentPM4FMUnitType=lucentPM4FMUnitType, lucentPM4ModemRenegotiates=lucentPM4ModemRenegotiates, lucentPM4FMEnvTrapCtl=lucentPM4FMEnvTrapCtl, lucentPM4T1E1PMCurBoard=lucentPM4T1E1PMCurBoard, lucentPM4EtherAltNameServer=lucentPM4EtherAltNameServer, lucentPM4T1E1PMCurLineNum=lucentPM4T1E1PMCurLineNum, lucentPM4BoardTempNormalTrap=lucentPM4BoardTempNormalTrap, lucentPM4ModemCompression=lucentPM4ModemCompression, lucentPM4EtherInFilter=lucentPM4EtherInFilter, lucentPM4SerialOutOctets=lucentPM4SerialOutOctets, lucentPM4T1E1PMTotalIfIndex=lucentPM4T1E1PMTotalIfIndex, lucentPM4T1E1PMTotalLCVs=lucentPM4T1E1PMTotalLCVs, lucentPM4SerialDirection=lucentPM4SerialDirection, lucentPM4T1E1PMTotalLESs=lucentPM4T1E1PMTotalLESs, lucentPM4FanRestoredTrap=lucentPM4FanRestoredTrap, lucentPM4SWRev=lucentPM4SWRev, lucentPM4PwrSupFailTrap=lucentPM4PwrSupFailTrap, lucentPM4FMT1E1ThreshBoardIndex=lucentPM4FMT1E1ThreshBoardIndex, lucentPM4ModemOutByteCount=lucentPM4ModemOutByteCount, lucentPM4SerialifDescr=lucentPM4SerialifDescr, lucentPM4T1E1SerialIndex=lucentPM4T1E1SerialIndex, lucentPM4ChasCmdBoardId=lucentPM4ChasCmdBoardId, lucentPM4ModemEntry=lucentPM4ModemEntry, lucentPM4FMT1E1ThreshRepTimer=lucentPM4FMT1E1ThreshRepTimer, lucentPM4T1E1PMCurLCVs=lucentPM4T1E1PMCurLCVs, lucentPM4EtherOptRip=lucentPM4EtherOptRip, lucentPM4T1E1PMTotalESs=lucentPM4T1E1PMTotalESs, lucentPM4FMEqpTrapCtl=lucentPM4FMEqpTrapCtl, lucentPM4BoardPwrOffTrap=lucentPM4BoardPwrOffTrap, lucentPM4RadiusAuthFailTrap=lucentPM4RadiusAuthFailTrap, lucentPM4AMCallEventTable=lucentPM4AMCallEventTable, PMEquipStatus=PMEquipStatus, lucentPM4T1E1PMIntLineNum=lucentPM4T1E1PMIntLineNum, lucentPM4PwrSupWarnTrap=lucentPM4PwrSupWarnTrap, lucentPM4T1E1CRCErrors=lucentPM4T1E1CRCErrors, lucentPM4SerialInOctets=lucentPM4SerialInOctets, lucentPM4ModemDetects=lucentPM4ModemDetects, lucentPM4FaultMgmtIsolation=lucentPM4FaultMgmtIsolation, lucentPM4T1E1PMTotalDMs=lucentPM4T1E1PMTotalDMs, lucentPM4T1E1PMCurUnitType=lucentPM4T1E1PMCurUnitType, lucentPM4SnmpCommLastError=lucentPM4SnmpCommLastError, lucentPM4SerialTable=lucentPM4SerialTable, lucentPM4AMCEInOctets=lucentPM4AMCEInOctets, lucentPM4FMUnitIndex=lucentPM4FMUnitIndex)
mibBuilder.exportSymbols("LIVINGSTON-PM4-MIB", lucentPM4AcctMgmtCallEvent=lucentPM4AcctMgmtCallEvent, lucentPM4ModemInSpeed=lucentPM4ModemInSpeed, lucentPM4EtherIpAddress=lucentPM4EtherIpAddress, lucentPM4T1E1PerfMgmt=lucentPM4T1E1PerfMgmt, lucentPM4AMCECalledPartyID=lucentPM4AMCECalledPartyID, lucentPM4BoardTooHotTrap=lucentPM4BoardTooHotTrap, lucentPM4SerialTypeNwDialout=lucentPM4SerialTypeNwDialout, lucentPM4SnmpCommTable=lucentPM4SnmpCommTable, lucentPM4SerialDS0State=lucentPM4SerialDS0State, lucentPM4SerialPortStatus=lucentPM4SerialPortStatus, lucentPM4EtherIpGateway=lucentPM4EtherIpGateway, lucentPM4ChasCmdUnitIndex=lucentPM4ChasCmdUnitIndex, lucentPM4ModemProtocol=lucentPM4ModemProtocol, lucentPM4T1E1Function=lucentPM4T1E1Function, lucentPM4FMT1E1ThreshUASs=lucentPM4FMT1E1ThreshUASs, lucentPM4T1E1PMIntBoard=lucentPM4T1E1PMIntBoard, lucentPM4Mib=lucentPM4Mib, lucentPM4SerialTypeDeviceName=lucentPM4SerialTypeDeviceName, lucentPM4T1E1PMIntInterval=lucentPM4T1E1PMIntInterval, lucentPM4EtherIfIndex=lucentPM4EtherIfIndex, lucentPM4SnmpCommEntry=lucentPM4SnmpCommEntry, lucentPM4FMChasTrapBoardID=lucentPM4FMChasTrapBoardID, lucentPM4EtherOptEtherDown=lucentPM4EtherOptEtherDown, lucentPM4FMEqpTrapId=lucentPM4FMEqpTrapId)
| [
"[email protected]"
]
| |
49307e4030a27ff3a99f09bee2dfa9b7677a0bfa | 6109a95a284891792c35d0d19906ab8d1697f9c7 | /src/datamigration/azext_datamigration/vendored_sdks/datamigration/operations/_database_migrations_sql_mi_operations.py | 442a15827c7342be590b73150c6bde88654f882a | [
"MIT",
"LicenseRef-scancode-generic-cla"
]
| permissive | Tatsinnit/azure-cli-extensions | 3e5a1752edced00d7c33660027d2c17fae074569 | a1959b123d4c11149adae2728ab5791949889d54 | refs/heads/master | 2022-10-05T17:40:10.825889 | 2022-03-16T10:33:56 | 2022-03-16T10:33:56 | 250,102,909 | 0 | 0 | MIT | 2020-03-25T22:12:01 | 2020-03-25T22:12:01 | null | UTF-8 | Python | false | false | 26,682 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DatabaseMigrationsSqlMiOperations(object):
"""DatabaseMigrationsSqlMiOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datamigration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
migration_operation_id=None, # type: Optional[str]
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.DatabaseMigrationSqlMi"
"""Retrieve the Database Migration resource.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param migration_operation_id: Optional migration operation ID. If this is provided, then
details of migration operation for that ID are retrieved. If not provided (default), then
details related to most recent or current operation are retrieved.
:type migration_operation_id: str
:param expand: The child resources to include in the response.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabaseMigrationSqlMi, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.DatabaseMigrationSqlMi
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DatabaseMigrationSqlMi"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if migration_operation_id is not None:
query_parameters['migrationOperationId'] = self._serialize.query("migration_operation_id", migration_operation_id, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.DatabaseMigrationSqlMi"
**kwargs # type: Any
):
# type: (...) -> "models.DatabaseMigrationSqlMi"
cls = kwargs.pop('cls', None) # type: ClsType["models.DatabaseMigrationSqlMi"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DatabaseMigrationSqlMi')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.DatabaseMigrationSqlMi"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.DatabaseMigrationSqlMi"]
"""Create a new database migration to a given SQL Managed Instance.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param parameters: Details of SqlMigrationService resource.
:type parameters: ~azure.mgmt.datamigration.models.DatabaseMigrationSqlMi
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DatabaseMigrationSqlMi or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.datamigration.models.DatabaseMigrationSqlMi]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DatabaseMigrationSqlMi"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
target_db_name=target_db_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}'} # type: ignore
def _cancel_initial(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'MigrationOperationInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cancel'} # type: ignore
def begin_cancel(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stop migrations in progress for the database.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param parameters: Required migration operation ID for which cancel will be initiated.
:type parameters: ~azure.mgmt.datamigration.models.MigrationOperationInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
target_db_name=target_db_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cancel'} # type: ignore
def _cutover_initial(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._cutover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'MigrationOperationInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cutover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cutover'} # type: ignore
def begin_cutover(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Initiate cutover for online migration in progress for the database.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param parameters: Required migration operation ID for which cutover will be initiated.
:type parameters: ~azure.mgmt.datamigration.models.MigrationOperationInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cutover_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
target_db_name=target_db_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cutover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cutover'} # type: ignore
| [
"[email protected]"
]
| |
931dd466a1df578c537a95a690a9c0529161c7ba | 6553f98336fa296ca4faa6e3e247c0a343d883f3 | /yolo/utils.py | c639349f2c4306f0ce792157149cee22126cc8ce | []
| no_license | DableUTeeF/algea | 5341a529534e26f1d7ae4ad71d064f32f8f0aba5 | 3a34c796bdddd07f2ab17811fe472cdce6d9207a | refs/heads/master | 2020-08-29T12:27:39.566480 | 2019-11-08T02:41:03 | 2019-11-08T02:41:03 | 218,030,814 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,045 | py | import numpy as np
import os
import xml.etree.ElementTree as ET
import csv
import cv2
# from keras.optimizers import Optimizer
# from keras import backend as K
import copy
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from six import raise_from
import csv
import sys
import os.path
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
""" Parse the classes file given by csv_reader.
"""
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_annotations(csv_reader, classes):
""" Read annotations from the csv_reader.
"""
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)),
None)
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def _open_for_csv(path):
""" Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
class CocoGenerator:
""" Generate data from the COCO dataset.
See https://github.com/cocodataset/cocoapi/tree/master/PythonAPI for more information.
"""
def __init__(self, json_path, image_dir):
""" Initialize a COCO data generator.
Args
data_dir: Path to where the COCO dataset is stored.
set_name: Name of the set to parse.
"""
self.image_dir = image_dir
self.coco = COCO(json_path)
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
""" Loads the class to label mapping (and inverse) for COCO.
"""
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def size(self):
""" Size of the COCO dataset.
"""
return len(self.image_ids)
def num_classes(self):
""" Number of classes in the dataset. For COCO this is 80.
"""
return len(self.classes)
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def coco_label_to_label(self, coco_label):
""" Map COCO label to the label as used in the network.
COCO has some gaps in the order of labels. The highest label is 90, but there are 80 classes.
"""
return self.coco_labels_inverse[coco_label]
def coco_label_to_name(self, coco_label):
""" Map COCO label to name.
"""
return self.label_to_name(self.coco_label_to_label(coco_label))
def label_to_coco_label(self, label):
""" Map label as used by the network to labels as used by COCO.
"""
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {'labels': np.empty((0,)), 'bboxes': np.empty((0, 4))}
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotations['labels'] = np.concatenate(
[annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[
a['bbox'][0],
a['bbox'][1],
a['bbox'][0] + a['bbox'][2],
a['bbox'][1] + a['bbox'][3],
]]], axis=0)
return annotations
def parse_annotation(ann_dir, img_dir, labels=()):
all_imgs = []
seen_labels = {}
for ann in sorted(os.listdir(ann_dir)):
img = {'object': []}
tree = ET.parse(os.path.join(ann_dir, ann))
for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = os.path.join(img_dir, elem.text)
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs += [img]
return all_imgs, seen_labels
def parse_voc_annotation(ann_dir, img_dir, labels=()):
all_imgs = {}
seen_labels = {}
max_box_per_image = 0
for ann in sorted(os.listdir(ann_dir)):
img = {'object': []}
tree = ET.parse(os.path.join(ann_dir, ann))
for elem in tree.iter():
if 'filename' in elem.tag:
filename = elem.text[:-4]
img['filename'] = os.path.join(img_dir, elem.text)
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs[filename] = img
if len(img['object']) > max_box_per_image:
max_box_per_image = len(img['object'])
return all_imgs, seen_labels, max_box_per_image
def create_voc_training_instances(voc_folder):
# parse annotations of the training set
ints, labels, max_box_per_image = parse_voc_annotation(os.path.join(voc_folder, 'Annotations'),
os.path.join(voc_folder, 'JPEGImages'))
train_txt = open(os.path.join(voc_folder, 'ImageSets/Main/train.txt')).read().split('\n')[:-1]
val_txt = open(os.path.join(voc_folder, 'ImageSets/Main/val.txt')).read().split('\n')[:-1]
train_ints = [ints[train] for train in train_txt]
valid_ints = [ints[val] for val in val_txt]
# for instance in ints:
# filename = os.path.split(instance['filename'])[-1][:-4]
# if filename in train_txt:
# train_ints.append(instance)
# else:
# valid_ints.append(instance)
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_csv_training_instances(train_csv, test_csv, class_csv, with_wh=False):
with _open_for_csv(class_csv) as file:
classes = _read_classes(csv.reader(file, delimiter=','))
with _open_for_csv(train_csv) as file:
train_image_data = _read_annotations(csv.reader(file, delimiter=','), classes)
with _open_for_csv(test_csv) as file:
test_image_data = _read_annotations(csv.reader(file, delimiter=','), classes)
train_ints = []
valid_ints = []
labels = list(classes)
max_box_per_image = 0
for k in train_image_data:
image_data = train_image_data[k]
ints = {'filename': k, 'object': []}
for i, obj in enumerate(image_data):
o = {'xmin': obj['x1'], 'xmax': obj['x2'], 'ymin': obj['y1'], 'ymax': obj['y2'], 'name': obj['class']}
if with_wh:
x = cv2.imread(k)
height, width, _ = x.shape
o['width'] = width
o['height'] = height
ints['object'].append(o)
if i + 1 > max_box_per_image:
max_box_per_image = i + 1
train_ints.append(ints)
for k in test_image_data:
image_data = test_image_data[k]
ints = {'filename': k, 'object': []}
for i, obj in enumerate(image_data):
o = {'xmin': obj['x1'], 'xmax': obj['x2'], 'ymin': obj['y1'], 'ymax': obj['y2'], 'name': obj['class']}
if with_wh:
x = cv2.imread(k)
height, width, _ = x.shape
o['width'] = width
o['height'] = height
ints['object'].append(o)
if i + 1 > max_box_per_image:
max_box_per_image = i + 1
valid_ints.append(ints)
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_coco_training_instances(train_json,
val_json,
train_image_dir,
val_image_dir,
with_empty=True
):
train_coco = CocoGenerator(train_json, train_image_dir)
val_coco = CocoGenerator(val_json, val_image_dir)
assert sorted(val_coco.labels) == sorted(
train_coco.labels), r"Something's wrong, the labels in val and train seem to not the same"
labels = {}
for label in val_coco.labels:
labels[val_coco.labels[label]] = 0
max_box_per_image = 0
train_ints = []
valid_ints = []
for image_index in range(len(train_coco.image_ids)):
ann = train_coco.load_annotations(image_index)
image_info = train_coco.coco.loadImgs(train_coco.image_ids[image_index])[0]
impath = os.path.join(train_coco.image_dir, image_info['file_name'])
instance = {'filename': impath,
'object': [],
'width': image_info['width'],
'height': image_info['height']}
for j in range(len(ann['labels'])):
x1 = int(ann['bboxes'][j][0])
y1 = int(ann['bboxes'][j][1])
x2 = int(ann['bboxes'][j][2])
y2 = int(ann['bboxes'][j][3])
cls = train_coco.labels[ann['labels'][j]]
obj = {'xmin': x1, 'xmax': x2, 'ymin': y1, 'ymax': y2, 'name': cls}
instance['object'].append(obj)
if with_empty or len(instance['object']) > 0:
train_ints.append(instance)
if len(instance['object']) > max_box_per_image:
max_box_per_image = len(instance['object'])
for image_index in range(len(val_coco.image_ids)):
ann = val_coco.load_annotations(image_index)
image_info = val_coco.coco.loadImgs(val_coco.image_ids[image_index])[0]
impath = os.path.join(val_coco.image_dir, image_info['file_name'])
instance = {'filename': impath,
'object': [],
'width': image_info['width'],
'height': image_info['height']}
for j in range(len(ann['labels'])):
x1 = int(ann['bboxes'][j][0])
y1 = int(ann['bboxes'][j][1])
x2 = int(ann['bboxes'][j][2])
y2 = int(ann['bboxes'][j][3])
cls = val_coco.labels[ann['labels'][j]]
obj = {'xmin': x1, 'xmax': x2, 'ymin': y1, 'ymax': y2, 'name': cls}
instance['object'].append(obj)
if with_empty or len(instance['object']) > 0:
valid_ints.append(instance)
if len(instance['object']) > max_box_per_image:
max_box_per_image = len(instance['object'])
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_training_instances(train_annot_folder,
train_image_folder,
valid_annot_folder,
valid_image_folder,
labels,
):
# parse annotations of the training set
train_ints, train_labels = parse_annotation(train_annot_folder, train_image_folder, labels)
# parse annotations of the validation set, if any, otherwise split the training set
if os.path.exists(valid_annot_folder):
valid_ints, valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels)
else:
print("valid_annot_folder not exists. Spliting the trainining set.")
train_valid_split = int(0.8 * len(train_ints))
np.random.seed(0)
np.random.shuffle(train_ints)
np.random.seed()
valid_ints = train_ints[train_valid_split:]
train_ints = train_ints[:train_valid_split]
# compare the seen labels with the given labels in config.json
if len(labels) > 0:
overlap_labels = set(labels).intersection(set(train_labels.keys()))
print('Seen labels: \t' + str(train_labels) + '\n')
print('Given labels: \t' + str(labels))
# return None, None, None if some given label is not in the dataset
if len(overlap_labels) < len(labels):
print('\033[33m\nThese labels has no image')
for label in labels:
if label not in overlap_labels:
print(label)
print('\033[0m')
labels = list(overlap_labels)
else:
print('No labels are provided. Train on all seen labels.')
# print(train_labels)
labels = train_labels.keys()
max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])
return train_ints, valid_ints, sorted(labels), max_box_per_image
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
class WeightReader:
def __init__(self, weight_file):
self.offset = 4
self.all_weights = np.fromfile(weight_file, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset - size:self.offset]
def reset(self):
self.offset = 4
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin
w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin
union = w1 * h1 + w2 * h2 - intersect
return float(intersect) / union
def draw_boxes(image, boxes, labels):
image_h, image_w, _ = image.shape
color = [(0, 255, 0), (0, 255, 255), (255, 255, 0), (0, 0, 255), (255, 0, 255), (255, 0, 0)]
for box in boxes:
xmin = max(0, int(box.xmin * image_w))
ymin = max(0, int(box.ymin * image_h))
xmax = min(int(box.xmax * image_w), image_w)
ymax = min(int(box.ymax * image_h), image_h)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color[box.get_label() % 6], 3)
cv2.putText(image,
labels[box.get_label()] + ' ' + str(box.get_score()),
(xmin, ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image_h,
color[box.get_label() % 6], 1)
return image
def decode_netout(netout, anchors, nb_class, obj_threshold=0.3, nms_threshold=0.3):
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row, col, b, 5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row, col, b, :4]
x = (col + _sigmoid(x)) / grid_w # center position, unit: image width
y = (row + _sigmoid(y)) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height
confidence = netout[row, col, b, 4]
box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, confidence, classes)
boxes.append(box)
# suppress non-maximal boxes
for c in range(nb_class):
sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
else:
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:
boxes[index_j].classes[c] = 0
# remove the boxes which are less likely than a obj_threshold
boxes = [box for box in boxes if box.get_score() > obj_threshold]
return boxes
def decode_netoutv3(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h * grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if objectness <= obj_thresh:
continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row, col, b, :4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row, col, b, 5:]
box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, objectness, classes)
boxes.append(box)
return boxes
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-faster_rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-faster_rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-faster_rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x / np.min(x) * t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
def _rand_scale(scale):
scale = np.random.uniform(1, scale)
return scale if (np.random.randint(2) == 0) else 1. / scale
def _constrain(min_v, max_v, value):
if value < min_v:
return min_v
if value > max_v:
return max_v
return value
def random_flip(image, flip):
if flip == 1:
return cv2.flip(image, 1)
return image
def correct_bounding_boxes(boxes, new_w, new_h, net_w, net_h, dx, dy, flip, image_w, image_h):
boxes = copy.deepcopy(boxes)
# randomize boxes' order
np.random.shuffle(boxes)
# correct sizes and positions
sx, sy = float(new_w) / image_w, float(new_h) / image_h
zero_boxes = []
for i in range(len(boxes)):
boxes[i]['xmin'] = int(_constrain(0, net_w, boxes[i]['xmin'] * sx + dx))
boxes[i]['xmax'] = int(_constrain(0, net_w, boxes[i]['xmax'] * sx + dx))
boxes[i]['ymin'] = int(_constrain(0, net_h, boxes[i]['ymin'] * sy + dy))
boxes[i]['ymax'] = int(_constrain(0, net_h, boxes[i]['ymax'] * sy + dy))
if boxes[i]['xmax'] <= boxes[i]['xmin'] or boxes[i]['ymax'] <= boxes[i]['ymin']:
zero_boxes += [i]
continue
if flip == 1:
swap = boxes[i]['xmin']
boxes[i]['xmin'] = net_w - boxes[i]['xmax']
boxes[i]['xmax'] = net_w - swap
boxes = [boxes[i] for i in range(len(boxes)) if i not in zero_boxes]
return boxes
def random_distort_image(image, hue=18, saturation=1.5, exposure=1.5):
# determine scale factors
dhue = np.random.uniform(-hue, hue)
dsat = _rand_scale(saturation)
dexp = _rand_scale(exposure)
# convert RGB space to HSV space
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV).astype('float')
# change satuation and exposure
image[:, :, 1] *= dsat
image[:, :, 2] *= dexp
# change hue
image[:, :, 0] += dhue
image[:, :, 0] -= (image[:, :, 0] > 180) * 180
image[:, :, 0] += (image[:, :, 0] < 0) * 180
# convert back to RGB from HSV
return cv2.cvtColor(image.astype('uint8'), cv2.COLOR_HSV2RGB)
def apply_random_scale_and_crop(image, new_w, new_h, net_w, net_h, dx, dy):
try:
im_sized = cv2.resize(image, (new_w, new_h))
except cv2.error as e:
print('something')
print(new_w, new_h)
raise cv2.error('{}, {} {}'.format(new_w, new_h, e.__cause__))
if dx > 0:
im_sized = np.pad(im_sized, ((0, 0), (dx, 0), (0, 0)), mode='constant', constant_values=127)
else:
im_sized = im_sized[:, -dx:, :]
if (new_w + dx) < net_w:
im_sized = np.pad(im_sized, ((0, 0), (0, net_w - (new_w + dx)), (0, 0)), mode='constant', constant_values=127)
if dy > 0:
im_sized = np.pad(im_sized, ((dy, 0), (0, 0), (0, 0)), mode='constant', constant_values=127)
else:
im_sized = im_sized[-dy:, :, :]
if (new_h + dy) < net_h:
im_sized = np.pad(im_sized, ((0, net_h - (new_h + dy)), (0, 0), (0, 0)), mode='constant', constant_values=127)
return im_sized[:net_h, :net_w, :]
def makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def label_to_coco_label(label):
return {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17,
16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33,
29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47,
42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60,
55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77,
68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90}[label]
def coco_label_to_label(coco_label):
dictionary = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16,
15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31,
27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43,
39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56,
51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72,
63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85,
75: 86, 76: 87, 77: 88, 78: 89, 79: 90}
for label, d_coco_label in dictionary.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)
if d_coco_label == coco_label:
return label
return -1
def boundbox2cocobox(boxes, scale):
"""
:param scale:
:param boxes: [Bndbox(), Bndbox(),...]
:return: boxes: [[x, y, w, h]]
scores: float
labels: int
"""
cocoboxes = []
scores = []
labels = []
for bbox in boxes:
cocoboxes.append([bbox.xmin / scale,
bbox.ymin / scale,
(bbox.xmax - bbox.xmin) / scale,
(bbox.ymax - bbox.ymin) / scale])
scores.append(bbox.get_score())
labels.append(bbox.get_label())
assert len(cocoboxes) == len(scores) == len(labels)
return cocoboxes, scores, labels
def compute_resize_scale(image_shape, min_side=800, max_side=1333):
""" Compute an image scale such that the image size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resizing scale.
"""
(rows, cols, _) = image_shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
return scale
def resize_image(img, min_side=800, max_side=1333):
""" Resize an image such that the size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resized image.
"""
# compute scale to resize the image
scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side)
# resize the image with the computed scale
img = cv2.resize(img, None, fx=scale, fy=scale)
return img, scale
# noinspection PyTypeChecker
def evaluate_coco(generator, model, anchors, json_path, imsize=448, threshold=0.5):
""" Use the pycocotools to evaluate a COCO model on a dataset.
Args
generator : The generator for generating the evaluation data.
model : The model to evaluate.
threshold : The score threshold to use.
"""
# start collecting results
import pickle
if os.path.exists('coco_eval_temp.pk'):
results, image_ids = pickle.load(open('coco_eval_temp.pk', 'rb'))
else:
results = []
image_ids = []
for index in range(generator.size()):
# if index % 50 == 0:
# print()
print(index, end='\r')
image = generator.load_image(index)
image, scale = resize_image(image, 360, imsize)
image = np.expand_dims(image, 0)
boxes = get_yolo_boxes(model,
image,
imsize, imsize,
anchors,
0.5,
0.5,
preprocess=True)[0]
boxes, scores, labels = boundbox2cocobox(boxes, scale)
# assert len(boxes) > 0
# compute predicted labels and scores
image_id = int(os.path.split(generator.instances[index]['filename'])[-1][:-4])
for box, score, label in zip(boxes, scores, labels):
# scores are sorted, so we can break
if score < threshold:
break
# append detection for each positively labeled class
image_result = {
'image_id': image_id,
'category_id': label_to_coco_label(label), # todo:
'score': float(score),
'bbox': box,
}
# append detection to results
results.append(image_result)
# append image to list of processed images
image_ids.append(image_id)
with open('coco_eval_temp.pk', 'wb') as wr:
pickle.dump([results, image_ids], wr)
if not len(results):
return
import json
# write output
json.dump(results, open('{}_bbox_results.json'.format('val2017'), 'w'), indent=4)
json.dump(image_ids, open('{}_processed_image_ids.json'.format('val2017'), 'w'), indent=4)
# load results in COCO evaluation tool
coco_true = COCO(json_path)
coco_pred = coco_true.loadRes('{}_bbox_results.json'.format('val2017'))
# run COCO evaluation
coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
# noinspection PyTypeChecker
def evaluate(model,
generator,
iou_threshold=0.5,
obj_thresh=0.5,
nms_thresh=0.45,
net_h=416,
net_w=416,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
model : The model to evaluate.
generator : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
obj_thresh : The threshold used to distinguish between object and non-object
nms_thresh : The threshold used to determine whether two detections are duplicates
net_h : The height of the input image to the model, higher value results in better accuracy
net_w : The width of the input image to the model
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
all_annotations = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
for i in range(generator.size()):
print(i, end='\r')
raw_image = [generator.load_image(i)]
# make the boxes and the labels
pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]
score = np.array([box.get_score() for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
try:
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
except IndexError:
pass
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
print()
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
print(i, end='\r')
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
# noinspection PyTypeChecker
def evaluate_acc(model,
generator,
iou_threshold=0.5,
obj_thresh=0.5,
nms_thresh=0.45,
net_h=416,
net_w=416,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
model : The model to evaluate.
generator : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
obj_thresh : The threshold used to distinguish between object and non-object
nms_thresh : The threshold used to determine whether two detections are duplicates
net_h : The height of the input image to the model, higher value results in better accuracy
net_w : The width of the input image to the model
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
all_annotations = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
for i in range(generator.size()):
print(i, end='\r')
raw_image = [generator.load_image(i)]
# make the boxes and the labels
pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]
score = np.array([box.get_score() for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
try:
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
except IndexError:
pass
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
print()
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
print(i, end='\r')
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def normalize(image):
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
image = np.subtract(image.astype('float32'), MEAN_RGB)
image = np.divide(image, STDDEV_RGB)
return image # effnet use this instead of image/255.
def draw_boxesv3(image, boxes, labels, obj_thresh):
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255),
(255, 0, 255), (255, 255, 0), (0, 255, 255),
(0, 0, 0), (255, 255, 255),
]
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
label = i
# print(labels[i] + ': ' + str(box.classes[i] * 100) + '%')
if label >= 0:
cv2.rectangle(image, (box.xmin, box.ymin), (box.xmax, box.ymax), color[box.get_label() % 6], 1)
cv2.putText(image,
label_str + ' ' + str(box.get_score()),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
color[box.get_label() % 8], 1)
return image
def preprocess_input(image, net_h, net_w):
new_h, new_w, _ = image.shape
# determine the new size of the image
if (float(net_w) / new_w) < (float(net_h) / new_h):
new_h = (new_h * net_w) // new_w
new_w = net_w
else:
new_w = (new_w * net_h) // new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(normalize(image[:, :, ::-1]), (new_w, new_h))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[(net_h - new_h) // 2:(net_h + new_h) // 2, (net_w - new_w) // 2:(net_w + new_w) // 2, :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def get_yolo_boxes(model, images, net_h, net_w, anchors, obj_thresh, nms_thresh, preprocess=True):
image_h, image_w, _ = images[0].shape
nb_images = len(images)
batch_input = np.zeros((nb_images, net_h, net_w, 3))
# preprocess the input
if preprocess:
for i in range(nb_images):
batch_input[i] = preprocess_input(images[i], net_h, net_w)
# run the prediction
batch_output = model.predict_on_batch(batch_input)
batch_boxes = [None] * nb_images
for i in range(nb_images):
yolos = [batch_output[0][i], batch_output[1][i], batch_output[2][i]]
boxes = []
# decode the output of the network
for j in range(len(yolos)):
yolo_anchors = anchors[(2 - j) * 6:(3 - j) * 6] # config['model']['anchors']
boxes += decode_netoutv3(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
batch_boxes[i] = boxes
return batch_boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w) / image_w) < (float(net_h) / image_h):
new_w = net_w
new_h = (image_h * net_w) / image_w
else:
new_h = net_w
new_w = (image_w * net_h) / image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w) / 2. / net_w, float(new_w) / net_w
y_offset, y_scale = (net_h - new_h) / 2. / net_h, float(new_h) / net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
| [
"[email protected]"
]
| |
84f29f68b65af4c479188bad5fe13eb540caa362 | 7fac5e7216c8f4328f21a14f9e222005890a57f8 | /11_Actor_Critic_Advantage/refactor/CartPole/network.py | a91406a51ae6f89da620c63ac4298837c272d612 | []
| no_license | ZhangRui111/MorvanRL | bee77d644df50ce9900be6ec7d702c395238fae4 | ad443d56314427aa9ebe4af552dde0f5470da967 | refs/heads/master | 2021-04-14T12:05:14.657272 | 2019-03-26T02:28:31 | 2019-03-26T02:28:31 | 126,663,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import numpy as np
import tensorflow as tf
def build_actor_network(n_features, n_actions, lr):
s = tf.placeholder(tf.float32, [1, n_features], "state")
a = tf.placeholder(tf.int32, None, "act")
td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error
with tf.variable_scope('Actor'):
l1 = tf.contrib.layers.fully_connected(s, 20, activation_fn=tf.nn.relu)
acts_prob = tf.contrib.layers.fully_connected(l1, n_actions, activation_fn=tf.nn.softmax)
with tf.variable_scope('exp_v'):
log_prob = tf.log(acts_prob[0, a])
# log_prob = tf.exp(acts_prob[0, a]) # tf.exp can also convergent
exp_v = tf.reduce_mean(log_prob * td_error) # advantage (TD_error) guided loss
with tf.variable_scope('train'):
train_op = tf.train.AdamOptimizer(lr).minimize(-exp_v) # minimize(-exp_v) = maximize(exp_v)
return [[s, a, td_error], [acts_prob, exp_v, train_op]]
# # debug mode # #
# return [[s, a, td_error], [acts_prob, exp_v, train_op], [log_prob, l1]]
# # debug mode # #
def build_critic_network(n_features, lr, discount):
s = tf.placeholder(tf.float32, [1, n_features], "state")
v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'):
l1 = tf.contrib.layers.fully_connected(s, 20, activation_fn=tf.nn.relu)
v = tf.contrib.layers.fully_connected(l1, 1, activation_fn=None)
with tf.variable_scope('squared_TD_error'):
td_error = r + discount * v_ - v
loss = tf.square(td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
return [[s, v_, r], [v, td_error, loss, train_op]]
| [
"[email protected]"
]
| |
e0b1b9862bfdcbcd85808a3da492258a9d3be3b4 | 05148c0ea223cfc7ed9d16234ab3e6bb40885e9d | /Packages/matplotlib-2.2.2/examples/units/basic_units.py | be07f0c9fce57fe2c456d751270dfbe77b983ac9 | [
"MIT"
]
| permissive | NightKirie/NCKU_NLP_2018_industry3 | 9ee226e194287fd9088429f87c58c874e050a8b3 | 23ac13644b140587e23cfeffb114c7c6f46f17a2 | refs/heads/master | 2021-06-05T05:33:09.510647 | 2018-07-05T10:19:47 | 2018-07-05T10:19:47 | 133,680,341 | 1 | 4 | MIT | 2020-05-20T16:29:54 | 2018-05-16T14:43:38 | Python | UTF-8 | Python | false | false | 10,875 | py | """
===========
Basic Units
===========
"""
import six
import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
from matplotlib.axes import Axes
from matplotlib.cbook import iterable
class ProxyDelegate(object):
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta(type):
def __init__(cls, name, bases, dict):
for fn_name in cls._proxies:
try:
dummy = getattr(cls, fn_name)
except AttributeError:
setattr(cls, fn_name,
ProxyDelegate(fn_name, cls._proxies[fn_name]))
class PassThroughProxy(object):
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
return (NotImplemented if ret is NotImplemented
else TaggedValue(ret, self.unit))
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if ret is NotImplemented:
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if ret_unit is NotImplemented:
return NotImplemented
return TaggedValue(ret, ret_unit)
class TaggedValue(six.with_metaclass(TaggedValueMeta)):
_proxies = {'__add__': ConvertAllProxy,
'__sub__': ConvertAllProxy,
'__mul__': ConvertAllProxy,
'__rmul__': ConvertAllProxy,
'__cmp__': ConvertAllProxy,
'__lt__': ConvertAllProxy,
'__gt__': ConvertAllProxy,
'__len__': PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type('TaggedValue_of_%s' % (value_class.__name__),
tuple([cls, value_class]),
{})
if subcls not in units.registry:
units.registry[subcls] = basicConverter
return object.__new__(subcls)
except TypeError:
if cls not in units.registry:
units.registry[cls] = basicConverter
return object.__new__(cls)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def __getattribute__(self, name):
if name.startswith('__'):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if hasattr(variable, name) and name not in self.__class__.__dict__:
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, dtype=object):
return np.asarray(self.value).astype(dtype)
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue(' + repr(self.value) + ', ' + repr(self.unit) + ')'
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __len__(self):
return len(self.value)
def __iter__(self):
# Return a generator expression rather than use `yield`, so that
# TypeError is raised by iter(self) if appropriate when checking for
# iterability.
return (TaggedValue(inner, self.unit) for inner in self.value)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
if unit == self.unit or not unit:
return self
new_value = self.unit.convert_value_to(self.value, unit)
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
class BasicUnit(object):
def __init__(self, name, fullname=None):
self.name = name
if fullname is None:
fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return 'BasicUnit(%s)' % self.name
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if unit is NotImplemented:
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver(object):
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if (unit_1 != unit_2):
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if (len(non_null) > 1):
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if (operation not in self.op_dict):
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n//2,)
else:
return r'$%s\pi/2$' % (n,)
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
'return AxisInfo instance for x and unit'
if unit == radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit == degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if iterable(val):
return [thisval.convert_to(unit).get_value() for thisval in val]
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos(x):
if iterable(x):
return [math.cos(val.convert_to(radians).get_value()) for val in x]
else:
return math.cos(x.convert_to(radians).get_value())
basicConverter = BasicUnitConverter()
units.registry[BasicUnit] = basicConverter
units.registry[TaggedValue] = basicConverter
| [
"[email protected]"
]
| |
5db9f1bb82aaada88a79243dab0be796299f41e9 | a1d8fefb84ce2f69ebce5fedcdf5262ba0005a5f | /zvt/recorders/eastmoney/meta/china_stock_category_recorder.py | a0a6b9c47e284491ad80a9fade92202ba428d1f4 | [
"MIT"
]
| permissive | vinhphu3000/FinanceCenter | f2c827ffe268421011682ed45375f55ac6ddc54a | 1511751fe6d7d1f1fb940ae66d29b45eb0782fea | refs/heads/master | 2023-03-30T19:55:17.124679 | 2021-03-27T11:40:18 | 2021-03-27T11:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,630 | py | # -*- coding: utf-8 -*-
import pandas as pd
from numba import njit
from zvt import zvt_config
from zvt.api.data_type import Region, Provider, EntityType
from zvt.api.quote import china_stock_code_to_id
from zvt.domain import BlockStock, BlockCategory, Block
from zvt.contract.api import df_to_db
from zvt.contract.recorder import RecorderForEntities, TimeSeriesDataRecorder
from zvt.networking.request import sync_get
from zvt.utils.time_utils import now_pd_timestamp, PD_TIME_FORMAT_DAY
from zvt.utils.utils import json_callback_param
class EastmoneyChinaBlockRecorder(RecorderForEntities):
provider = Provider.EastMoney
data_schema = Block
region = Region.CHN
# 用于抓取行业/概念/地域列表
category_map_url = {
BlockCategory.industry: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKHY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_F1A61014DE5E45B7A50068EA290BC918&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08766',
BlockCategory.concept: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKGN&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=300&lvl=&cb=jsonp_3071689CC1E6486A80027D69E8B33F26&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08251',
# BlockCategory.area: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKDY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_A597D4867B3D4659A203AADE5B3B3AD5&token=4f1862fc3b5e77c150a2b985b12db0fd&_=02443'
}
def init_entities(self):
self.entities = [BlockCategory.industry, BlockCategory.concept]
def process_loop(self, entity, http_session):
text = sync_get(http_session, self.category_map_url[entity], return_type='text')
if text is None:
return
results = json_callback_param(text)
@njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
code = items[1]
name = items[2]
entity_id = f'block_cn_{code}'
the_list.append({
'id': entity_id,
'entity_id': entity_id,
'entity_type': EntityType.Block.value,
'exchange': 'cn',
'code': code,
'name': name,
'category': entity.value
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
df_to_db(df=df, ref_df=None, region=Region.CHN, data_schema=self.data_schema, provider=self.provider)
self.logger.info(f"finish record sina blocks:{entity.value}")
class EastmoneyChinaBlockStockRecorder(TimeSeriesDataRecorder):
region = Region.CHN
provider = Provider.EastMoney
entity_schema = Block
data_schema = BlockStock
# 用于抓取行业包含的股票
category_stocks_url = 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C.{}{}&sty=SFCOO&st=(Close)&sr=-1&p=1&ps=300&cb=jsonp_B66B5BAA1C1B47B5BB9778045845B947&token=7bc05d0d4c3c22ef9fca8c2a912d779c'
def __init__(self, exchanges=None, entity_ids=None, codes=None, batch_size=10, force_update=False, sleeping_time=5,
default_size=zvt_config['batch_size'], real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(EntityType.Block, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
def generate_domain_id(self, entity, df, time_fmt=PD_TIME_FORMAT_DAY):
return entity.id + '_' + df['stock_id']
def record(self, entity, start, end, size, timestamps, http_session):
url = self.category_stocks_url.format(entity.code, '1')
text = sync_get(http_session, url, return_type='text')
if text is None:
return None
results = json_callback_param(text)
# @njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
stock_code = items[1]
stock_id = china_stock_code_to_id(stock_code)
the_list.append({
'stock_id': stock_id,
'stock_code': stock_code,
'stock_name': items[2],
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
return df
self.sleep()
return None
def format(self, entity, df):
df['timestamp'] = now_pd_timestamp(Region.CHN)
df['entity_id'] = entity.id
df['provider'] = self.provider.value
df['code'] = entity.code
df['name'] = entity.name
df['level'] = self.level.value
df['exchange'] = entity.exchange
df['entity_type'] = EntityType.Block.value
df['id'] = self.generate_domain_id(entity, df)
return df
__all__ = ['EastmoneyChinaBlockRecorder', 'EastmoneyChinaBlockStockRecorder']
if __name__ == '__main__':
# init_log('china_stock_category.log')
recorder = EastmoneyChinaBlockStockRecorder(codes=['BK0727'])
recorder.run()
| [
"[email protected]"
]
| |
a25196a8f29cc48a0abcab0af5d74810790319c3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/470/usersdata/281/112524/submittedfiles/Av2_Parte3.py | a4b0c34470239c11ef1a33686d04422e6413ad37 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
m=int(input('Digite a quantidade de listas desejada: '))
for i in range(0,m,1):
lista=[]
n=int(input('Digite a quantidade de elementos da %d lista: ' %(i+1)))
for i in range(0,n,1):
lista.append(int(input('Digite o %d elemento dessa lista: ' %(i+1))))
media=sum(lista)/len(lista)
for i in range(0,n,1):
soma=0
soma(i-media)**2
dp=((1/(n-1))*soma)**(1/2)
print(media)
print(dp)
| [
"[email protected]"
]
| |
aca102ba379f86d774530313c359be0ea25547c8 | 747f759311d404af31c0f80029e88098193f6269 | /extra-addons/hr_attendance_analysis/interface.py | 01271421ed420fa708a35f11eb536752ed1a9217 | []
| no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Clock Reader for OpenERP
# Copyright (C) 2004-2009 Moldeo Interactive CT
# (<http://www.moldeointeractive.com.ar>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import timeutils as tu
class Interface(object):
def __init__(self, cr, uid, pool, oid, otype):
self._parms = (cr, uid, pool)
self._cache = pool.get(otype).read(cr, uid, oid)
self._field = pool.get(otype).fields_get(cr, uid)
self._local_cache = {}
def __getitem__(self, name):
if name in self._local_cache:
return self._local_cache[name]
if name in self._cache:
ret = self._cache[name]
if isinstance(ret, bool): return ret
field = self._field[name]
if field['type'] in ['char','int','float', 'selection']:
_r = ret
elif field['type'] in ['datetime']:
_r = tu.dt(ret)
elif field['type'] in ['date']:
_r = tu.d(ret)
elif field['type'] in ['many2one']:
_r = Interface(*(self._parms + (ret[0] ,field['relation'])))
elif field['type'] in ['many2many', 'one2many']:
_r = map(lambda a: Interface(*(self._parms + a))
, zip(ret, [field['relation']]*len(ret)))
else:
raise NotImplementedError, \
"Not implemented for %s of type %s (%s)." % (name,
field['type'],
str(ret))
self._local_cache[name] = _r
return _r
else:
# raise ValueError, "Not exists %s in object." % name
return False
def __getattr__(self, name):
return self[name]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
]
| |
1648baed214078a8926589e49711518dd6f5a517 | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/220/users/4258/codes/1647_2445.py | e74ad3e0112c554a5f274784d793ec04694fb134 | []
| no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | escala = input("Escolha C para Celsius, ou F para Fahrenheit: ")
temp = float(input("Temperatura: "))
c = (5/9)*(temp - 32)
f = ((9/5)*temp) + 32
if(escala == "C"):
print(f)
if(escala == "F"):
print(c) | [
"[email protected]"
]
| |
21b8c9f44927459be125440bea1eff530f530da0 | 040236bf3bb45826c0bbc39e7432512ff420a0d1 | /geomk/api/serializers.py | 6c022a859e6e149bbc1d0f638e27c128eb57e92b | []
| no_license | ThiagoDiasV/parking-lot-api | 2768baf8921b9dc087616def8c93ccc4f2fe8cf5 | 5cb3f687099bea59740b0034aeebf9a65b791358 | refs/heads/master | 2022-12-13T02:25:50.754524 | 2020-02-12T12:08:32 | 2020-02-12T12:08:32 | 232,959,041 | 4 | 3 | null | 2022-03-08T21:10:08 | 2020-01-10T03:36:52 | Python | UTF-8 | Python | false | false | 991 | py | from .models import Car
from rest_framework import serializers
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = "__all__"
read_only_fields = ["entry_time", "left_time", "time", "paid", "left"]
def create(self, validated_data: dict) -> Car:
"""
Overriding create function to avoid POST with cars that already
are at parking lot and don't left yet.
Cars with plate registered can only enter if they already left the last
time.
"""
try:
cars = Car.objects.filter(plate=validated_data.get("plate"))
last_register = cars.last()
if last_register:
if not last_register.left:
raise serializers.ValidationError(
"Car already at parking lot and don't left yet."
)
except IndexError:
pass
return Car.objects.create(**validated_data)
| [
"[email protected]"
]
| |
769afb7623da0289c6dc97015f9e4fa301f95254 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/mzm.py | 3ec62cfc159dc28f0f1be15728991241acff646f | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'mZM':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
27da08cfa78217f9a5c0fc73b6cccf72ff2e25ac | 69a2f0c4419d0bf39d2fe46e8ff2ee117eaf237a | /mutilprocess/test.py | 5b992fe9cd521106dc327da20aafd0555f827fc5 | []
| no_license | lxy5513/python | 7da339e8ef6e2fa827e2da723c0f4e3595e11e04 | 228c3e631e642228de659e68f98ea938bcb2509f | refs/heads/master | 2020-03-27T03:21:03.582842 | 2020-01-17T00:39:57 | 2020-01-17T00:39:57 | 145,855,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import multiprocessing
import time
import collections
Queue = collections.deque(maxlen=10)
def consume(interval):
while True:
print("Queue: ", Queue)
if len(Queue) == 0:
print("no data")
time.sleep(0.5)
else:
num = Queue.pop()
print("Num: ", num)
time.sleep(0.5)
print("worker_1")
time.sleep(interval)
print("end worker_1")
def productor(interval):
while True:
print("productor")
time.sleep(interval)
Queue.append(1)
print("length of queue is: ", len(Queue))
print("end worker_2")
if __name__ == "__main__":
p1 = multiprocessing.Process(target = consume, args = (2,))
p2 = multiprocessing.Process(target = productor, args = (3,))
p1.start()
p2.start()
| [
"[email protected]"
]
| |
883182e36ae3c57c73a7b281ee795b79299603a9 | 191fbcc96b9f0c74b88b001003f024064c973753 | /gateware/rtl/platform/syzygy/boson.py | 8ca2d666a8022c603a68c64631c4c2278825ce82 | [
"BSD-2-Clause"
]
| permissive | gregdavill/boson-eth-firmware | f0b5895469260e414b90cd7e7e0fad37a5728159 | 056843c43fac6486114bfb916fb78a4f7d38e87c | refs/heads/main | 2023-08-24T12:55:10.648741 | 2021-10-15T00:28:16 | 2021-10-15T00:28:16 | 374,504,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | # This file is Copyright (c) 2020 Gregory Davill <[email protected]>
# License: BSD
from litex.build.generic_platform import *
def boson_syzygy_r0d1(syzygy_id=0):
_id = f'SYZYGY{syzygy_id}'
return [
("Boson", 0,
Subsignal("data", Pins(f'{_id}:S27 {_id}:P2C_CLKN {_id}:D5P {_id}:S26 \
{_id}:D7N {_id}:D2P {_id}:D2N {_id}:S17 \
{_id}:D1N {_id}:S16 {_id}:D5N {_id}:S18 \
{_id}:C2P_CLKN {_id}:S25 {_id}:D1P {_id}:D6P \
{_id}:D4P {_id}:D0P {_id}:D6N {_id}:S23 \
{_id}:'),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("clk", Pins("A17"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("vsync", Pins("A13"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("hsync", Pins("D16"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("valid", Pins("C16"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("tx", Pins("A3"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("rx", Pins("B9"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("reset", Pins("B2"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("ext_sync", Pins("B18"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("rst_n", Pins("SYZYGY1:D5N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("clk_p", Pins("SYZYGY1:D4P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("clk_n", Pins("SYZYGY1:D4N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("cs_n", Pins("SYZYGY1:D6P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW TERMINATION=OFF")),
Subsignal("dq", Pins("SYZYGY1:D2N SYZYGY1:D0N SYZYGY1:D5P SYZYGY1:D2P SYZYGY1:D3P SYZYGY1:D1N SYZYGY1:D1P SYZYGY1:D0P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("rwds", Pins("SYZYGY1:D3N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
),
] | [
"[email protected]"
]
| |
00d0a14e123abd54a6e59a43184ae690361ef49d | acbb6e1e33cf2c5dae45c73e3d07723ce21f1cf9 | /migrations/versions/ad4630b5d9d4_followers.py | 6b6dd161cc165a486f3c1637ff7b444302d21143 | []
| no_license | Tur-4000/microblog | 24edde54599937bc97bf782861868fea0f57814e | 24de02ed7c1d417b68171079dc366833f7d2e6c7 | refs/heads/master | 2022-05-25T22:16:10.609591 | 2018-08-02T20:34:40 | 2018-08-02T20:34:40 | 141,682,858 | 1 | 0 | null | 2022-05-25T00:20:33 | 2018-07-20T08:05:00 | Python | UTF-8 | Python | false | false | 840 | py | """followers
Revision ID: ad4630b5d9d4
Revises: 6f99f9ee47c0
Create Date: 2018-07-24 17:35:58.696784
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad4630b5d9d4'
down_revision = '6f99f9ee47c0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('followers',
sa.Column('follower_id', sa.Integer(), nullable=True),
sa.Column('followed_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('followers')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
b54dbcbba5b87d5e208a4286878095d159ab7260 | 4f75cc33b4d65d5e4b054fc35b831a388a46c896 | /test_watchlist.py | 395cd36afd93c199d6f54cfb098279bd0d6044b4 | []
| no_license | Lr-2002/newpage | c3fe2acc451e24f6408996ea1271c61c321de702 | c589ad974e7100aa9b1c2ccc095a959ff68069b6 | refs/heads/main | 2023-09-03T06:13:53.428236 | 2021-11-23T10:41:21 | 2021-11-23T10:41:21 | 402,606,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,048 | py | from os import name
from re import A, T
import unittest
from app import app, db, Movie, User
class WatchlistTestCase(unittest.TestCase):
def setUp(self):
app.config.update(
TESTING = True,
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
)
db.create_all()
user = User(name = 'test', username = 'test')
user.set_password('123')
movie = Movie(title ='Test Movie Title', year = 2000)
db.session.add_all([user,movie])
db.session.commit()
self.client = app.test_client() # create client to test
self.runner = app.test_cli_runner()
# app.test_cli_runner app.test_client
# both of them are built-in test function oin falsk
def tearDown(self):
""" close app and clean everything"""
db.session.remove()
db.drop_all()
def test_app_exist(self):
""" exist_testing by none (if app not exist then the object is nono)"""
self.assertIsNotNone(app)
def test_app_is_testing(self):
""" test_app_is_testing by give app.config"""
self.assertTrue(app.config['TESTING'])
def test_404_page(self):
response = self.client.get('/nothing')
data = response.get_data(as_text=True)
self.assertIn('Page Not Found - 404',data)
# test the response of 404_page
self.assertEqual(response.status_code, 404)
def test_index_page(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertEqual(response.status_code, 200)
def login(self):
self.client.post('/login', data=dict(
username = 'test',
password = '123'
),follow_redirects = True)
def test_create_item(self):
print(1)
self.login()
print(4)
response = self.client.post('/', data=dict(
title='New Movie',
year='2019'
), follow_redirects=True)
print(2)
data = response.get_data(as_text=True)
self.assertIn('Item created.', data)
self.assertIn('New Movie', data)
print(3)
# 测试创建条目操作,但电影标题为空
response = self.client.post('/', data=dict(
title='',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item created.', data)
self.assertIn('Invalid input.', data)
# 测试创建条目操作,但电影年份为空
response = self.client.post('/', data=dict(
title='New Movie',
year=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item created.', data)
self.assertIn('Invalid input.', data)
def test_update_item(self):
self.login()
# 测试更新页面
response = self.client.get('/movie/edit/1')
data = response.get_data(as_text=True)
self.assertIn('Edit', data)
self.assertIn('Test Movie Title', data)
self.assertIn('2000', data)
# 测试更新条目操作
response = self.client.post('/movie/edit/1', data=dict(
title='New Movie Edited',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item updated.', data)
self.assertIn('New Movie Edited', data)
# 测试更新条目操作,但电影标题为空
response = self.client.post('/movie/edit/1', data=dict(
title='',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item updated.', data)
self.assertIn('Invalid input.', data)
# 测试更新条目操作,但电影年份为空
response = self.client.post('/movie/edit/1', data=dict(
title='New Movie Edited Again',
year=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item updated.', data)
self.assertNotIn('New Movie Edited Again', data)
self.assertIn('Invalid input.', data)
# 测试删除条目
def test_delete_item(self):
self.login()
response = self.client.post('/movie/delete/1', follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item Deleted', data)
self.assertNotIn('Test Movie Title', data)
def test_login_protect(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertNotIn('Logout', data)
self.assertIn('Settings', data)
self.assertIn("<form method='post'>", data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
# 测试登录
def test_login(self):
response = self.client.post('/login', data=dict(
username='test',
password='123'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Successfully', data)
self.assertIn('logout', data)
self.assertIn('Settings', data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
self.assertIn("<form method='post'>", data)
# 测试使用错误的密码登录
response = self.client.post('/login', data=dict(
username='test',
password='456'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试使用错误的用户名登录
response = self.client.post('/login', data=dict(
username='wrong',
password='123'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试使用空用户名登录
response = self.client.post('/login', data=dict(
username='',
password='123'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试使用空密码登录
response = self.client.post('/login', data=dict(
username='test',
password=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试登出
def test_logout(self):
self.login()
response = self.client.get('/logout', follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('logged out', data)
# self.assertIn('Logout', data)
self.assertIn('Settings', data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
self.assertIn("<form method='post'>", data)
# 测试设置
def test_settings(self):
self.login()
# 测试设置页面
response = self.client.get('/settings')
data = response.get_data(as_text=True)
self.assertIn('Settings', data)
self.assertIn('Your Name', data)
# 测试更新设置
response = self.client.post('/settings', data=dict(
name='Grey Li',
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('changed', data)
self.assertIn('Grey Li', data)
# 测试更新设置,名称为空
response = self.client.post('/settings', data=dict(
name='',
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('changed', data)
# self.assertIn('Invalid input.', data)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
9dedcdee6a2d68515c547bd4a1b13efe3b23bdce | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/dnspython-1.15.0/examples/ddns.py | f351524ee738290cfe64177208bb0df88bbff61f | [
"LicenseRef-scancode-warranty-disclaimer",
"ISC",
"Apache-2.0"
]
| permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 1,204 | py | #!/usr/bin/env python
#
# Use a TSIG-signed DDNS update to update our hostname-to-address
# mapping.
#
# usage: ddns.py <ip-address>
#
# On linux systems, you can automatically update your DNS any time an
# interface comes up by adding an ifup-local script that invokes this
# python code.
#
# E.g. on my systems I have this
#
# #!/bin/sh
#
# DEVICE=$1
#
# if [ "X${DEVICE}" == "Xeth0" ]; then
# IPADDR=`LANG= LC_ALL= ifconfig ${DEVICE} | grep 'inet addr' |
# awk -F: '{ print $2 } ' | awk '{ print $1 }'`
# /usr/local/sbin/ddns.py $IPADDR
# fi
#
# in /etc/ifup-local.
#
import sys
import dns.update
import dns.query
import dns.tsigkeyring
#
# Replace the keyname and secret with appropriate values for your
# configuration.
#
keyring = dns.tsigkeyring.from_text({
'keyname.' : 'NjHwPsMKjdN++dOfE5iAiQ=='
})
#
# Replace "example." with your domain, and "host" with your hostname.
#
update = dns.update.Update('example.', keyring=keyring)
update.replace('host', 300, 'A', sys.argv[1])
#
# Replace "10.0.0.1" with the IP address of your master server.
#
response = dns.query.tcp(update, '10.0.0.1', timeout=10)
| [
"[email protected]"
]
| |
499388b2165572001dc1138029488a7777cf7e8c | 45fdc51cf264bbd50e59655440eefc91451c50ea | /text/src/textwrap_dedent.py | 5084ccaedb083bd8a9ae1878d3c3217339c0efd4 | []
| no_license | blindij/python3_stl | 2163043f3a9113eac21a48a35685a4a01987e926 | ea138e25f8b5bbf7d8f78e4b1b7e2ae413de4735 | refs/heads/master | 2021-12-24T20:37:54.055116 | 2021-09-29T13:37:38 | 2021-09-29T13:37:38 | 191,508,648 | 0 | 0 | null | 2019-08-27T15:45:53 | 2019-06-12T06:10:30 | Python | UTF-8 | Python | false | false | 142 | py | import textwrap
from textwrap_example import sample_text
dedented_text = textwrap.dedent(sample_text)
print('Dedented')
print(dedented_text)
| [
"[email protected]"
]
| |
2eb81e6bc89d77f0ee7640edaec9543348a8f465 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/315/101991/submittedfiles/minha_bib.py | 37dfe39276605bb65b991985c20942c3898a1b93 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | # -*- coding: utf-8 -*-
import random
#Simbolo que o Jogador quer utilizar
def solicitaSimbolodoHumano(a):
a = input('\nQual símbolo você deseja utilizar no jogo? ')
while a!='O' and a!='X' and a!='o' and a!='x':
a = input('\nQual símbolo você deseja utilizar no jogo? ')
if a == 'x' or a =='X':
a = ' X '
else:
a = ' O '
return a
#Sorteio de quem ira começar jogando
def sorteioPrimeiraJogada(jogador, nome):
jogador = random.choice((0,1))
if jogador ==1:
print('\nVencedor do sorteio para inicio do jogo : %s'%nome)
else:
print('\nVencedor do sorteio para inicio do jogo : Computador')
return jogador
#Printa o tabuleiro
def mostraTabuleiro(tabuleiro):
print ('')
print (tabuleiro[0][0] + '|' + tabuleiro[0][1] + '|' + tabuleiro[0][2])
print (tabuleiro[1][0] + '|' + tabuleiro[1][1] + '|' + tabuleiro[1][2])
print (tabuleiro[2][0] + '|' + tabuleiro[2][1] + '|' + tabuleiro[2][2])
#Jogada do computador TA COM ERRO
def JogadaComputador(smbPC,tabuleiro):
while True:
ti = random.choice((0,1,2))
tj = random.choice((0,1,2))
if tabuleiro[ti][tj] == ' ':
break
else:
ti = random.choice((0,1,2))
tj = random.choice((0,1,2))
tabuleiro[ti][tj] = smbPC
return tabuleiro
#Verifica se a jogada é valida
def validaJogada(a, tabuleiro, nome):
while True:
if tabuleiro[int(a[0])][int(a[2])] == (' '):
break
else:
print('\nOPS!!! Essa jogada não está disponível. Tente novamente!')
a = input('\nQual a sua jogada, %s? ' %nome)
return a
#sua jogada
def JogadaHumana(smbH,tabuleiro, nome):
mostraTabuleiro(tabuleiro)
n = input('\nQual a sua jogada, %s? ' %nome)
n = validaJogada(n, tabuleiro, nome)
tabuleiro[int(n[0])][int(n[2])] = smbH
return tabuleiro
#Verifica se alguem ganhou
def verificaVencedor(simbolo, tabuleiro):
if tabuleiro[0][0] == simbolo and tabuleiro[0][1] == simbolo and tabuleiro[0][2] == simbolo:
return True
elif tabuleiro[1][0] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[1][2] == simbolo:
return True
elif tabuleiro[2][0] == simbolo and tabuleiro[2][1] == simbolo and tabuleiro[2][2] == simbolo:
return True
elif tabuleiro[0][0] == simbolo and tabuleiro[1][0] == simbolo and tabuleiro[2][0] == simbolo:
return True
elif tabuleiro[1][0] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[1][2] == simbolo:
return True
elif tabuleiro[2][0] == simbolo and tabuleiro[2][1] == simbolo and tabuleiro[2][2] == simbolo:
return True
elif tabuleiro[0][0] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[2][2] == simbolo:
return True
elif tabuleiro[0][2] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[2][0] == simbolo:
return True
elif (' ') not in tabuleiro:
print('Velha')
| [
"[email protected]"
]
| |
80de93ce6ac31685b8012386a62622a1db6f1fc7 | aa9297175621fcd499cad5a0373aaad15f33cde8 | /impractical_py_projects/04/null_cipher_finder.py | 217c00b8e15b8e5e7cc33e404b729d1f1166c3ca | []
| no_license | eflipe/python-exercises | a64e88affe8f9deb34e8aa29a23a68c25e7ba08a | b7a429f57a5e4c5dda7c77db5721ca66a401d0a3 | refs/heads/master | 2023-04-26T19:19:28.674350 | 2022-07-19T20:53:09 | 2022-07-19T20:53:09 | 192,589,885 | 0 | 0 | null | 2023-04-21T21:23:14 | 2019-06-18T18:06:14 | HTML | UTF-8 | Python | false | false | 1,433 | py | import sys
import string
def load_text(file):
"""Load a text file as a string"""
with open(file) as f:
file = f.read().strip()
return file
def sole_null_cipher(message, lookahead):
for i in range(1, lookahead+1):
plaintext = ''
count = 0
found_first = False
for char in message:
if char in string.punctuation:
count = 0
found_first = True
elif found_first is True:
count += 1
if count == i:
plaintext += char
print("Using offset of {} after punctuation = {}".format(i, plaintext))
print()
def main():
filename = input("\nIngresa el mensaje: ")
try:
loaded_message = load_text(filename)
except IOError as e:
print(f'{e}. Error!')
sys.exit(1)
print("\nMensaje =")
print("{}".format(loaded_message), "\n")
print("\nList of punctuation marks to check = {}".format(string.punctuation))
message = ''.join(loaded_message.split())
while True:
lookahead = input("\nLetras a checkear después de" \
"un signo de puntuación: ")
if lookahead.isdigit():
lookahead = int(lookahead)
break
else:
print("Pls, ingresa un número")
print()
sole_null_cipher(message, lookahead)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
14378df2d496adc2ab62a597cefb735979db3c8d | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/muahangtructuyencomvn.py | e3ab3bbbf0f13987eaeba9a31f3ed5a9bd875132 | [
"MIT"
]
| permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-info']/h1[@class='mainbox-title']",
'price' : "//div[@class='product-info']/div[@class='clear']/p/span/span/span | //div[@class='product-info']/div[@class='prices-container clear']/div[@class='float-left product-prices']/p/span/span/span",
'category' : "//div[@class='breadcrumbs']/a",
'description' : "//div[@class='product-main-info']/div[@id='tabs_content']",
'images' : "//div[@class='product-main-info']/form/div/div/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'muahangtructuyen.com.vn'
allowed_domains = ['muahangtructuyen.com.vn']
start_urls = ['http://muahangtructuyen.com.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+/+$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"[email protected]"
]
| |
ee57f682e295cbfd9747da50306e7deadad5f554 | b66e70a8bb3c53595acd01dceb23298694884b67 | /cloudy/cloudy/models.py | 92cd7f135177bdbc3b72c907c8741df29eb2c148 | []
| no_license | flupke/cloudy-release | d7735a38d79f816c52da3d983c714512a32919b1 | 6b160188a7067f125b107eb68dc8db4bbb4bfdf4 | refs/heads/master | 2016-09-06T05:23:40.856287 | 2013-02-23T18:17:16 | 2013-02-23T18:17:16 | 8,377,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | from django.db import models
class SshIdentity(models.Model):
name = models.CharField(max_length=256)
public = models.TextField()
private = models.TextField()
class HostsGroup(models.Model):
name = models.CharField(max_length=256)
ssh_user = models.CharField(max_length=32, blank=True)
ssh_identity = models.ForeignKey(SshIdentity, blank=True)
class Host(models.Model):
hostname = models.CharField(max_length=256)
alias = models.CharField(max_length=256, blank=True)
group = models.ForeignKey(HostsGroup)
ssh_user = models.CharField(max_length=32, blank=True)
ssh_identity = models.ForeignKey(SshIdentity, blank=True)
class Project(models.Model):
name = models.CharField(max_length=64)
hosts = models.ForeignKey(HostsGroup)
class Check(models.Model):
project = models.ForeignKey(Project)
name = models.CharField(max_length=64)
command = models.TextField()
| [
"[email protected]"
]
| |
b69d77bed712f5e5879450544915df9006afc0cc | 40b31d45c216a876843b9285be626180e7e989c9 | /novaagent/__init__.py | a46e74db9c3f0d040015cbc7e988088a38ea173b | [
"Apache-2.0"
]
| permissive | inflatador/nova-agent | 7221d492f35d4862e482e3803358a514e6a012d4 | c0449d500166f4adf3cd753dddb7c67087260bb1 | refs/heads/master | 2020-03-23T13:44:16.624566 | 2018-06-28T19:58:18 | 2018-06-28T19:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py |
__version__ = '2.1.14'
| [
"[email protected]"
]
| |
6adc753cf5c0b93e22a7d940f84597658076e3fa | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /F/FindResultantArrayAfterRemovingAnagrams.py | b380d59a74a054967ffe8ad6c2e2113609d1576b | []
| no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | '''
-Easy-
You are given a 0-indexed string array words, where words[i] consists of lowercase English letters.
In one operation, select any index i such that 0 < i < words.length and words[i - 1] and words[i] are anagrams, and delete words[i] from words. Keep performing this operation as long as you can select an index that satisfies the conditions.
Return words after performing all operations. It can be shown that selecting the indices for each operation in any arbitrary order will lead to the same result.
An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase using all the original letters exactly once. For example, "dacb" is an anagram of "abdc".
Example 1:
Input: words = ["abba","baba","bbaa","cd","cd"]
Output: ["abba","cd"]
Explanation:
One of the ways we can obtain the resultant array is by using the following operations:
- Since words[2] = "bbaa" and words[1] = "baba" are anagrams, we choose index 2 and delete words[2].
Now words = ["abba","baba","cd","cd"].
- Since words[1] = "baba" and words[0] = "abba" are anagrams, we choose index 1 and delete words[1].
Now words = ["abba","cd","cd"].
- Since words[2] = "cd" and words[1] = "cd" are anagrams, we choose index 2 and delete words[2].
Now words = ["abba","cd"].
We can no longer perform any operations, so ["abba","cd"] is the final answer.
Example 2:
Input: words = ["a","b","c","d","e"]
Output: ["a","b","c","d","e"]
Explanation:
No two adjacent strings in words are anagrams of each other, so no operations are performed.
Constraints:
1 <= words.length <= 100
1 <= words[i].length <= 10
words[i] consists of lowercase English letters.
'''
from typing import List
class Solution:
def removeAnagrams(self, words: List[str]) -> List[str]:
stack = []
for word in words:
if stack and sorted(stack[-1]) == sorted(word):
continue
stack.append(word)
return stack
| [
"[email protected]"
]
| |
ce2343c09e39f921202647e30c1bfea5cae7d3a8 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /rllib/examples/deterministic_training.py | 3a0a9c725acda75ce6b9cd7557c4fb04fd59a650 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
]
| permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 2,464 | py | """
Example of a fully deterministic, repeatable RLlib train run using
the "seed" config key.
"""
import argparse
import ray
from ray import tune
from ray.rllib.examples.env.env_using_remote_actor import (
CartPoleWithRemoteParamServer,
ParameterStorage,
)
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.test_utils import check
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--framework", choices=["tf2", "tf", "tfe", "torch"], default="tf")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=2)
parser.add_argument("--num-gpus-trainer", type=float, default=0)
parser.add_argument("--num-gpus-per-worker", type=float, default=0)
if __name__ == "__main__":
args = parser.parse_args()
param_storage = ParameterStorage.options(name="param-server").remote()
config = {
"env": CartPoleWithRemoteParamServer,
"env_config": {
"param_server": "param-server",
},
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": args.num_gpus_trainer,
"num_workers": 1, # parallelism
"num_gpus_per_worker": args.num_gpus_per_worker,
"num_envs_per_worker": 2,
"framework": args.framework,
# Make sure every environment gets a fixed seed.
"seed": args.seed,
# Simplify to run this example script faster.
"train_batch_size": 100,
"sgd_minibatch_size": 10,
"num_sgd_iter": 5,
"rollout_fragment_length": 50,
}
stop = {
"training_iteration": args.stop_iters,
}
results1 = tune.run(args.run, config=config, stop=stop, verbose=1)
results2 = tune.run(args.run, config=config, stop=stop, verbose=1)
if args.as_test:
results1 = list(results1.results.values())[0]
results2 = list(results2.results.values())[0]
# Test rollout behavior.
check(results1["hist_stats"], results2["hist_stats"])
# As well as training behavior (minibatch sequence during SGD
# iterations).
check(
results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
)
ray.shutdown()
| [
"[email protected]"
]
| |
f9ac252177ad6e419233ca977c739c8b9a08c30c | 4bf5a16c17f888d5e0a2b043a6b752a6111824fd | /src/biotite/structure/util.py | 34495270dbcba6c8b3f79077462e59bc1fe60708 | [
"BSD-3-Clause"
]
| permissive | AAABioInfo/biotite | 1b0e8c6d6fbc870ff894fc1ae91c32fe6568aed3 | 693f347534bcf2c8894bbcabf68c225c43190ec6 | refs/heads/master | 2022-07-06T01:15:25.373371 | 2020-05-18T13:27:01 | 2020-05-18T13:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
Utility functions for in internal use in `Bio.Structure` package
"""
__name__ = "biotite.structure"
__author__ = "Patrick Kunzmann"
__all__ = ["vector_dot", "norm_vector", "distance", "matrix_rotate"]
import numpy as np
def vector_dot(v1,v2):
"""
Calculate vector dot product of two vectors.
Parameters
----------
v1,v2 : ndarray
The arrays to calculate the product from.
The vectors are represented by the last axis.
Returns
-------
product : float or ndarray
Scalar product over the last dimension of the arrays.
"""
return (v1*v2).sum(axis=-1)
def norm_vector(v):
"""
Normalise a vector.
Parameters
----------
v : ndarray
The array containg the vector(s).
The vectors are represented by the last axis.
"""
factor = np.linalg.norm(v, axis=-1)
if isinstance(factor, np.ndarray):
v /= factor[..., np.newaxis]
else:
v /= factor
def distance(v1,v2):
"""
Calculate the distance between two position vectors.
Parameters
----------
v1,v2 : ndarray
The arrays to calculate the product from.
The vectors are represented by the last axis.
Returns
-------
product : float or ndarray
Vector distance over the last dimension of the array.
"""
dif = v1 - v2
return np.sqrt((dif*dif).sum(axis=-1))
def matrix_rotate(v, matrix):
"""
Perform a rotation using a rotation matrix.
Parameters
----------
v : ndarray
The coordinates to rotate.
matrix : ndarray
The rotation matrix.
Returns
-------
rotated : ndarray
The rotated coordinates.
"""
# For proper rotation reshape into a maximum of 2 dimensions
orig_ndim = v.ndim
if orig_ndim > 2:
orig_shape = v.shape
v = v.reshape(-1, 3)
# Apply rotation
v = np.dot(matrix, v.T).T
# Reshape back into original shape
if orig_ndim > 2:
v = v.reshape(*orig_shape)
return v
| [
"[email protected]"
]
| |
742ed5a7da53469a0161d9225e9841a8d8cd06b4 | 90ec9a009d84dd7eebbd93de4f4b9de553326a39 | /app/customer/views.py | f18aa6381cc21c8fb4bfde7ab8a60775f87a3157 | []
| no_license | alexiuasse/NipponArDjango | 18a86bb108b9d72b36c8adf7c4344398cc4ca6b2 | ddc541a8d7e4428bde63c56f44354d6f82e0f40d | refs/heads/master | 2023-08-03T12:16:56.431870 | 2021-07-15T23:43:33 | 2021-07-15T23:43:33 | 278,093,323 | 0 | 0 | null | 2021-09-22T20:04:15 | 2020-07-08T13:13:22 | CSS | UTF-8 | Python | false | false | 7,674 | py | # Created by Alex Matos Iuasse.
# Copyright (c) 2020. All rights reserved.
# Last modified 24/08/2020 17:44.
from typing import Dict, Any
from django.contrib.admin.utils import NestedObjects
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import View
from django.views.generic.edit import DeleteView, CreateView, UpdateView
from django_filters.views import FilterView
from django_tables2.paginators import LazyPaginator
from django_tables2.views import SingleTableMixin
from .conf import *
from .filters import *
from .forms import *
from .tables import *
from frontend.icons import ICON_PERSON, ICON_NEW_PERSON
class CustomerProfile(LoginRequiredMixin, View):
template = 'customer/profile.html'
def get(self, request, pk, tp):
obj = IndividualCustomer.objects.get(pk=pk) if tp == 0 else JuridicalCustomer.objects.get(pk=pk)
header = HEADER_CLASS_INDIVIDUAL_CUSTOMER if tp == 0 else HEADER_CLASS_JURIDICAL_CUSTOMER
context = {
'config': {
'header': header
},
'obj': obj,
}
return render(request, self.template, context)
class Customer(LoginRequiredMixin, View):
template = 'customer/view.html'
title = TITLE_VIEW_CUSTOMER
subtitle = SUBTITLE_VIEW_CUSTOMER
def get(self, request):
links = {
'Pessoas Físicas': {
'Pessoa Física': {
'name': "Ver Todas Pessoas Físicas",
'link': reverse_lazy('customer:individualcustomer:view'),
'contextual': 'success',
'icon': ICON_PERSON,
},
'Novo Cadastro': {
'name': "Novo Cadastro",
'link': reverse_lazy('customer:individualcustomer:create'),
'contextual': 'primary',
'icon': ICON_NEW_PERSON,
},
},
'Pessoas Jurídicas': {
'Pessoa Jurídica': {
'name': "Ver Todas Pessoas Jurídicas",
'link': reverse_lazy('customer:juridicalcustomer:view'),
'contextual': 'success',
'icon': ICON_PERSON,
},
'Novo Cadastro': {
'name': "Novo Cadastro",
'link': reverse_lazy('customer:juridicalcustomer:create'),
'contextual': 'primary',
'icon': ICON_NEW_PERSON,
},
},
}
context = {
'title': self.title,
'subtitle': self.subtitle,
'links': links
}
return render(request, self.template, context)
########################################################################################################################
class IndividualCustomerView(LoginRequiredMixin, PermissionRequiredMixin, SingleTableMixin, FilterView):
model = IndividualCustomer
table_class = IndividualCustomerTable
filterset_class = IndividualCustomerFilter
paginator_class = LazyPaginator
permission_required = 'customer.view_individualcustomer'
template_name = 'base/view.html'
title = TITLE_VIEW_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
new = reverse_lazy('customer:individualcustomer:create')
back_url = reverse_lazy('customer:index')
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
class IndividualCustomerCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = IndividualCustomer
form_class = IndividualCustomerForm
template_name = 'customer/form.html'
permission_required = 'customer.create_individualcustomer'
title = TITLE_CREATE_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
@staticmethod
def get_back_url():
return reverse_lazy('customer:individualcustomer:view')
class IndividualCustomerEdit(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
model = IndividualCustomer
form_class = IndividualCustomerForm
template_name = 'customer/form.html'
permission_required = 'customer.edit_individualcustomer'
title = TITLE_EDIT_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
# delete all services
class IndividualCustomerDel(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = IndividualCustomer
template_name = "base/confirm_delete.html"
permission_required = 'customer.del_individualcustomer'
success_url = reverse_lazy('customer:individualcustomer:view')
title = TITLE_DEL_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
def get_context_data(self, **kwargs):
context: Dict[str, Any] = super().get_context_data(**kwargs)
collector = NestedObjects(using='default') # or specific database
collector.collect([context['object']])
to_delete = collector.nested()
context['extra_object'] = to_delete
return context
########################################################################################################################
class JuridicalCustomerView(LoginRequiredMixin, PermissionRequiredMixin, SingleTableMixin, FilterView):
model = JuridicalCustomer
table_class = JuridicalCustomerTable
filterset_class = JuridicalCustomerFilter
paginator_class = LazyPaginator
permission_required = 'customer.view_juridicalcustomer'
template_name = 'base/view.html'
title = TITLE_VIEW_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
new = reverse_lazy('customer:juridicalcustomer:create')
back_url = reverse_lazy('customer:index')
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
class JuridicalCustomerCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = JuridicalCustomer
form_class = JuridicalCustomerForm
template_name = 'base/form.html'
permission_required = 'customer.create_juridicalcustomer'
title = TITLE_CREATE_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
@staticmethod
def get_back_url():
return reverse_lazy('customer:juridicalcustomer:view')
class JuridicalCustomerEdit(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
model = JuridicalCustomer
form_class = JuridicalCustomerForm
template_name = 'base/form.html'
permission_required = 'customer.edit_juridicalcustomer'
title = TITLE_EDIT_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
# delete all services
class JuridicalCustomerDel(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = JuridicalCustomer
template_name = "base/confirm_delete.html"
permission_required = 'customer.del_juridicalcustomer'
success_url = reverse_lazy('customer:juridicalcustomer:view')
title = TITLE_DEL_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
def get_context_data(self, **kwargs):
context: Dict[str, Any] = super().get_context_data(**kwargs)
collector = NestedObjects(using='default') # or specific database
collector.collect([context['object']])
to_delete = collector.nested()
context['extra_object'] = to_delete
return context
| [
"[email protected]"
]
| |
a7c26984aed690a4bffc47db05dcfca2eaafb289 | 26f6313772161851b3b28b32a4f8d255499b3974 | /Python/MaximumNestingDepthofTwoValidParenthesesStrings.py | 67d4f477e9fa483c28fe2874e85607452ffd9d93 | []
| no_license | here0009/LeetCode | 693e634a3096d929e5c842c5c5b989fa388e0fcd | f96a2273c6831a8035e1adacfa452f73c599ae16 | refs/heads/master | 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | """
A string is a valid parentheses string (denoted VPS) if and only if it consists of "(" and ")" characters only, and:
It is the empty string, or
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS seq, split it into two disjoint subsequences A and B, such that A and B are VPS's (and A.length + B.length = seq.length).
Now choose any such A and B such that max(depth(A), depth(B)) is the minimum possible value.
Return an answer array (of length seq.length) that encodes such a choice of A and B: answer[i] = 0 if seq[i] is part of A, else answer[i] = 1. Note that even though multiple answers may exist, you may return any of them.
Example 1:
Input: seq = "(()())"
Output: [0,1,1,1,1,0]
Example 2:
Input: seq = "()(())()"
Output: [0,0,0,1,1,0,1,1]
Constraints:
1 <= seq.size <= 10000
"""
class Solution:
def maxDepthAfterSplit(self, seq: str):
res = [0]*len(seq)
stack = []
num = -1
for i,s in enumerate(seq):
if s == '(':
num += 1
stack.append(num)
res[i] = num
elif s == ')':
num -= 1
res[i] = stack.pop()
# print(res)
return [i%2 for i in res]
S = Solution()
seq = "(()())"
print(S.maxDepthAfterSplit(seq))
seq = "()(())()"
print(S.maxDepthAfterSplit(seq)) | [
"[email protected]"
]
| |
f7dd8f55dc709f693b0211d8fcd73662147731f0 | 5574620c834f96d4baf50d6aa349242dae7c17af | /41.first-missing-positive.py | 76b5ddd14a2ff4b66c5f2817265ba08c132b15ab | []
| no_license | Ming-H/leetcode | 52dceba5f9a605afbdaa65e286a37205873e21bb | 057cee4b830603ac12976ed7d5cea8d06a9b46a0 | refs/heads/main | 2023-09-02T21:30:48.796395 | 2023-09-01T01:59:48 | 2023-09-01T01:59:48 | 489,290,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #
# @lc app=leetcode id=41 lang=python3
#
# [41] First Missing Positive
#
class Solution:
def firstMissingPositive(self, nums):
"""
不能用额外空间,那就只有利用数组本身,跟Counting sort一样,
利用数组的index来作为数字本身的索引,把正数按照递增顺序依次放到数组中。
即让A[0]=1, A[1]=2, A[2]=3, ... , 这样一来,最后如果哪个数组元素
违反了A[i]=i+1即说明i+1就是我们要求的第一个缺失的正数。
"""
for i in range(len(nums)):
while 0 <= nums[i]-1 < len(nums) and nums[nums[i]-1] != nums[i]:
tmp = nums[i]-1
nums[i], nums[tmp] = nums[tmp], nums[i]
for i in range(len(nums)):
if nums[i] != i+1:
return i+1
return len(nums)+1
| [
"[email protected]"
]
| |
d28bf400e50f8c6d766ed1c1fb8dc15f1e4e723f | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/functional/trailing_comma_tuple.py | a832ccc28973265a5df8150f54034ca8fc5a239a | [
"MIT"
]
| permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 732 | py | """Check trailing comma one element tuples."""
# pylint: disable=bad-whitespace, missing-docstring
AAA = 1, # [trailing-comma-tuple]
BBB = "aaaa", # [trailing-comma-tuple]
CCC="aaa", # [trailing-comma-tuple]
FFF=['f'], # [trailing-comma-tuple]
BBB = 1, 2
CCC = (1, 2, 3)
DDD = (
1, 2, 3,
)
EEE = (
"aaa",
)
def test(*args, **kwargs):
return args, kwargs
test(widget=1, label='test')
test(widget=1,
label='test')
test(widget=1, \
label='test')
def some_func(first, second):
if first:
return first, # [trailing-comma-tuple]
if second:
return (first, second,)
return first, second, # [trailing-comma-tuple]
def some_other_func():
yield 'hello', # [trailing-comma-tuple]
| [
"[email protected]"
]
| |
283575d0431210f70f269274660f9a4d6ba55839 | 667c324c7e8ac6a38cc91cd8ec4921a0dc9a0492 | /backend/accounts/models.py | 1340ee3158c537192b304432dd0f40f65bb50e5d | []
| no_license | litvaOo/elmy-clone | 86fdf80fff91642c088fa3cee50bd4ad32518afd | eb30b5fd2eb8cfc177f3c6fec53d61722c7fe9cd | refs/heads/master | 2021-05-08T02:33:48.277250 | 2017-10-23T16:11:21 | 2017-10-23T16:11:21 | 108,006,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
class ServiceProvider(models.Model):
rating = models.DecimalField(max_digits=2, decimal_places=1)
description = models.CharField(max_length=1000)
latitude = models.FloatField(default=0)
longitude = models.FloatField(default=0)
city = models.CharField(max_length=30, blank=True, null=True)
class Client(models.Model):
previous_buys = models.IntegerField(blank=True, null=True, default=0)
class CustomUser(AbstractUser):
phone = models.CharField(max_length=12, blank=True, null=True)
bank_account = models.CharField(max_length=16, blank=True, null=True)
customer = models.OneToOneField(Client, blank=True, null=True)
provider = models.OneToOneField(ServiceProvider, blank=True, null=True)
def __str__(self):
try:
return "Username: {0}, city: {1}".format(self.username, self.provider.city)
except:
return self.username
# Create your models here.
| [
"[email protected]"
]
| |
8bac119f9df15d577d94fded7585b260efde9cc7 | a563a95e0d5b46158ca10d6edb3ca5d127cdc11f | /tccli/services/captcha/captcha_client.py | 8382673aac4f34d3d54b5528b41376e67b95efa9 | [
"Apache-2.0"
]
| permissive | SAIKARTHIGEYAN1512/tencentcloud-cli | e93221e0a7c70f392f79cda743a86d4ebbc9a222 | d129f1b3a943504af93d3d31bd0ac62f9d56e056 | refs/heads/master | 2020-08-29T09:20:23.790112 | 2019-10-25T09:30:39 | 2019-10-25T09:30:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,063 | py | # -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.captcha.v20190722 import captcha_client as captcha_client_v20190722
from tencentcloud.captcha.v20190722 import models as models_v20190722
from tccli.services.captcha import v20190722
from tccli.services.captcha.v20190722 import help as v20190722_help
def doDescribeCaptchaResult(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCaptchaResult", g_param[OptionsDefine.Version])
return
param = {
"CaptchaType": Utils.try_to_json(argv, "--CaptchaType"),
"Ticket": argv.get("--Ticket"),
"UserIp": argv.get("--UserIp"),
"Randstr": argv.get("--Randstr"),
"CaptchaAppId": Utils.try_to_json(argv, "--CaptchaAppId"),
"AppSecretKey": argv.get("--AppSecretKey"),
"BusinessId": Utils.try_to_json(argv, "--BusinessId"),
"SceneId": Utils.try_to_json(argv, "--SceneId"),
"MacAddress": argv.get("--MacAddress"),
"Imei": argv.get("--Imei"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CaptchaClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCaptchaResultRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCaptchaResult(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20190722": captcha_client_v20190722,
}
MODELS_MAP = {
"v20190722": models_v20190722,
}
ACTION_MAP = {
"DescribeCaptchaResult": doDescribeCaptchaResult,
}
AVAILABLE_VERSION_LIST = [
v20190722.version,
]
AVAILABLE_VERSIONS = {
'v' + v20190722.version.replace('-', ''): {"help": v20190722_help.INFO,"desc": v20190722_help.DESC},
}
def captcha_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "captcha", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("captcha", captcha_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["captcha"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["captcha"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "captcha", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["captcha"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| [
"[email protected]"
]
| |
973985b9f213204d6193613b33715c89be7142b6 | 555b9f764d9bca5232360979460bc35c2f5ad424 | /google/ads/google_ads/v1/proto/services/operating_system_version_constant_service_pb2.py | 1ee3878ce83414b2d29fbf7d33f34fba67bb97ed | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
]
| permissive | juanmacugat/google-ads-python | b50256163782bc0223bcd8b29f789d74f4cfad05 | 0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a | refs/heads/master | 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 | Apache-2.0 | 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null | UTF-8 | Python | false | true | 5,671 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/operating_system_version_constant_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import operating_system_version_constant_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_operating__system__version__constant__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/operating_system_version_constant_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB*OperatingSystemVersionConstantServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nVgoogle/ads/googleads_v1/proto/services/operating_system_version_constant_service.proto\x12 google.ads.googleads.v1.services\x1aOgoogle/ads/googleads_v1/proto/resources/operating_system_version_constant.proto\x1a\x1cgoogle/api/annotations.proto\"A\n(GetOperatingSystemVersionConstantRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\x9b\x02\n%OperatingSystemVersionConstantService\x12\xf1\x01\n!GetOperatingSystemVersionConstant\x12J.google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest\x1a\x41.google.ads.googleads.v1.resources.OperatingSystemVersionConstant\"=\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{resource_name=operatingSystemVersionConstants/*}B\x91\x02\n$com.google.ads.googleads.v1.servicesB*OperatingSystemVersionConstantServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_operating__system__version__constant__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST = _descriptor.Descriptor(
name='GetOperatingSystemVersionConstantRequest',
full_name='google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=235,
serialized_end=300,
)
DESCRIPTOR.message_types_by_name['GetOperatingSystemVersionConstantRequest'] = _GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetOperatingSystemVersionConstantRequest = _reflection.GeneratedProtocolMessageType('GetOperatingSystemVersionConstantRequest', (_message.Message,), dict(
DESCRIPTOR = _GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.operating_system_version_constant_service_pb2'
,
__doc__ = """Request message for
[OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant][google.ads.googleads.v1.services.OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant].
Attributes:
resource_name:
Resource name of the OS version to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest)
))
_sym_db.RegisterMessage(GetOperatingSystemVersionConstantRequest)
DESCRIPTOR._options = None
_OPERATINGSYSTEMVERSIONCONSTANTSERVICE = _descriptor.ServiceDescriptor(
name='OperatingSystemVersionConstantService',
full_name='google.ads.googleads.v1.services.OperatingSystemVersionConstantService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=303,
serialized_end=586,
methods=[
_descriptor.MethodDescriptor(
name='GetOperatingSystemVersionConstant',
full_name='google.ads.googleads.v1.services.OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant',
index=0,
containing_service=None,
input_type=_GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_operating__system__version__constant__pb2._OPERATINGSYSTEMVERSIONCONSTANT,
serialized_options=_b('\202\323\344\223\0027\0225/v1/{resource_name=operatingSystemVersionConstants/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_OPERATINGSYSTEMVERSIONCONSTANTSERVICE)
DESCRIPTOR.services_by_name['OperatingSystemVersionConstantService'] = _OPERATINGSYSTEMVERSIONCONSTANTSERVICE
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
b30622071ac8d8b8f022702c199e4e3e3d14d14c | 9ed05e94ad0779adda724a15591c459f47cd083a | /scripts/visualize_genomic_elements.py | f07cf8f8c747c5756efd2bcd74e54120f5620300 | [
"BSD-3-Clause"
]
| permissive | greenelab/tad_pathways | b9dad990a21dc30bb01fe9e6e8ed294ac9af18c7 | c871d99c6d73cc68f58ef89fffbc9b6bbefe416c | refs/heads/master | 2023-08-01T00:11:16.873202 | 2017-04-21T17:37:06 | 2017-04-21T17:37:06 | 65,410,058 | 1 | 2 | null | 2017-04-21T17:37:07 | 2016-08-10T19:21:20 | Python | UTF-8 | Python | false | false | 13,030 | py | """
2016 Gregory Way
scripts/visualize_genomic_elements.py
Description:
Summarizes the location of genomic elements across TADs
Usage:
Is called by 'scripts/visualize.sh' which is run inside of
'scripts/run_pipeline.sh'. This particular script will output the location
of genomic elements in a given input TAD
python scripts/visualize_genomic_elements.py --TAD-Boundary 'hESC'
Output:
Several .pdf plots in "figures/genome/" and chisquare analyses of the
"rightness" of SNPs in TADs and protein coding genes near boundaries.
"""
import os
import argparse
import csv
import numpy as np
import pandas as pd
from scipy.stats import chisquare
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
from tad_util.util import assign_bin
plt.figure.max_open_warning = 0
sns.set_style("whitegrid")
sns.set_style("ticks")
sns.set_context("paper", rc={"font.size": 20, "axes.titlesize": 20,
"axes.labelsize": 20, "xtick.labelsize": 12,
"ytick.labelsize": 12})
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--TAD-Boundary', help='boundary cell type. The'
'options can be "hESC", "IMR90", "mESC", or "cortex"')
args = parser.parse_args()
# Load Constants
num_bins = 50
tad_cell = args.TAD_Boundary
xlab = [''] * num_bins
for x in range(0, 50, 10):
xlab[x] = x
if tad_cell in ['hESC', 'IMR90']:
genome = 'hg19'
elif tad_cell in ['mESC', 'cortex']:
genome = 'mm9'
else:
raise ValueError('Please input: "hESC", "IMR90", "mESC", or "cortex"')
# Input files
base_file = '{}_{}'.format(genome, tad_cell)
snp_index = os.path.join('index', 'SNP_index_{}.tsv.bz2'.format(base_file))
gene_index = os.path.join('index', 'GENE_index_{}.tsv.bz2'.format(base_file))
repeat_index = os.path.join('index', 'REPEATS_index_{}.tsv.bz2'
.format(base_file))
# Output files
fig_base = os.path.join('figures', genome)
if not os.path.exists(fig_base):
os.makedirs(fig_base)
snp_count_file = os.path.join(fig_base, 'snp_count_{}.pdf'.format(base_file))
snp_dist_file = os.path.join(fig_base, 'snp_tad_distribution_{}.pdf'
.format(base_file))
snp_chrom_file = os.path.join(fig_base, 'snp_tad_distrib_chromosomes_{}.pdf'
.format(base_file))
snp_chi_square = os.path.join('results',
'tad_snp_rightness_chi_{}.csv').format(base_file)
gene_count_file = os.path.join(fig_base, 'gene_count_{}.pdf'
.format(base_file))
gene_chrom_file = os.path.join(fig_base, 'gene_tad_distrib_chromosomes_{}.pdf'
.format(base_file))
gene_type_file = os.path.join(fig_base, 'gene_types_{}.pdf'.format(base_file))
gene_chi_square = os.path.join('results',
'tad_gene_bound_chi_{}.csv').format(base_file)
repeat_count_file = os.path.join(fig_base, 'repeat_count_{}.pdf'
.format(base_file))
rep_type_file = os.path.join(fig_base, 'repeat_type_{}_.pdf'.format(base_file))
repeat_dist = os.path.join(fig_base, 'repeat_type_all_distrib_{}.pdf'
.format(base_file))
# Load Data
gene_types_df = pd.read_table(os.path.join('tables',
'gene_classification.tsv'))
snp_df = pd.read_table(snp_index, index_col=0)
gene_df = pd.read_table(gene_index, index_col=0)
repeat_df = pd.read_table(repeat_index, index_col=0)
#########################
# PART 1 - SNPs
#########################
# Process SNP dataframe
snp_df = snp_df[snp_df['TAD_id'] != 'Boundary']
bin_s = snp_df.apply(lambda x: assign_bin(x, bins=num_bins, ID='SNP'), axis=1)
snp_df = snp_df.assign(tad_bin=bin_s)
# Jointplot of number of SNPs per TAD by TAD length
plot_ready = snp_df.assign(tad_length=np.log10(snp_df.TAD_end
.sub(snp_df.TAD_start)))
plot_ready = pd.DataFrame(plot_ready.groupby(['TAD_id', 'tad_length'])
.tad_bin.count()).reset_index()
plot_ready = plot_ready.assign(snp_count_alt=plot_ready.tad_bin.div(1000))
ax = sns.jointplot('tad_length', 'snp_count_alt', data=plot_ready,
kind='scatter', stat_func=None,
color=sns.xkcd_rgb['medium green'], joint_kws={'s': 3})
ax.set_axis_labels(xlabel='TAD Length (log10 kb)',
ylabel='Number of SNPs (x1000)')
plt.tight_layout()
plt.savefig(snp_count_file)
plt.close()
# Distribution of SNPs across TADs
summary_snp = snp_df['tad_bin'].value_counts(sort=False)
p = sns.pointplot(x=summary_snp.index, y=summary_snp / 1000,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of SNPs (x1000)', xlabel='TAD Bins')
p.set_title('Distribution of SNPs across TADs')
plt.tight_layout()
plt.savefig(snp_dist_file)
plt.close()
# Chromosome-specific distribution
snp_chrom = snp_df.groupby('chromosome').tad_bin.value_counts(sort=False).\
unstack(level=0)
with PdfPages(snp_chrom_file) as pdf:
for chrom, chrom_df in snp_chrom.iteritems():
p = sns.pointplot(x=chrom_df.index, y=chrom_df,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of SNPs', xlabel='TAD Bins')
p.set_title('SNP Distribution in Chromosome {}'.format(chrom))
plt.tight_layout()
pdf.savefig()
plt.close()
# SNPs appear to be more concentrated on the right side of TADs
snp_side = [snp_df[snp_df['tad_bin'] < 25].shape[0],
snp_df[snp_df['tad_bin'] >= 25].shape[0]]
tad_snp_sig = chisquare(snp_side)
with open(snp_chi_square, 'w') as chisq_fh:
snpwriter = csv.writer(chisq_fh, delimiter=',')
snpwriter.writerow(['SNPs in the left vs. right of {} TAD'
.format(tad_cell)])
snpwriter.writerow(['left', 'right'])
snpwriter.writerow(snp_side)
snpwriter.writerow(tad_snp_sig)
#########################
# PART 2 - Genes
#########################
# Process genes
gene_df = gene_df[gene_df['TAD_id'] != 'Boundary']
bin_assign_gene = gene_df.apply(lambda x: assign_bin(x, bins=num_bins,
ID='gene'), axis=1)
gene_df = gene_df.assign(tad_bin=bin_assign_gene)
gene_df = gene_df[gene_df['tad_bin'] != -1]
# Jointplot of number of Genes per TAD
plot_ready_gene = gene_df.assign(tad_length=np.log10(gene_df.TAD_end
.sub(gene_df.TAD_start)))
plot_ready_gene = pd.DataFrame(plot_ready_gene.groupby(['TAD_id',
'tad_length'])
.tad_bin.count()).reset_index()
plot_ready_gene = plot_ready_gene.assign(gene_count_alt=plot_ready_gene
.tad_bin)
ax = sns.jointplot('tad_length', 'gene_count_alt', data=plot_ready_gene,
kind='scatter', stat_func=None,
color=sns.xkcd_rgb['medium green'], joint_kws={'s': 3})
ax.set_axis_labels(xlabel='TAD Length (log10 kb)',
ylabel='Number of Genes')
plt.savefig(gene_count_file)
plt.close()
# Chromosome specific distribution of genes across TADs
gene_chrom = gene_df.groupby('chromosome').tad_bin.value_counts(sort=False).\
unstack(level=0)
with PdfPages(gene_chrom_file) as pdf:
for chrom, chrom_df in gene_chrom.iteritems():
ax = sns.pointplot(x=chrom_df.index, y=chrom_df,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
ax.set(xticklabels=xlab)
ax.set(ylabel='Number of Genes', xlabel='TAD Bins')
ax.set_title('Gene Distribution in Chromosome {}'.format(chrom))
plt.tight_layout()
pdf.savefig()
plt.close()
# Gene-type specific distribution across TADs
gene_types_df = gene_types_df[gene_types_df[genome] == 1]
summary_gene_classes = []
with PdfPages(gene_type_file) as pdf:
for idx, gene in gene_types_df.iterrows():
gene_class = gene['gene_class']
gene_type = gene['gene_type']
if gene_class in ['tr_gene', 'ig_gene', 'tr_pseud', 'ig_pseud']:
gene_type = gene_types_df[gene_types_df['gene_class'] ==
gene_class]['gene_type']
gene_sub_df = gene_df[gene_df['gene_type'].isin(gene_type)]
plot_title = gene_class
if gene_class in summary_gene_classes:
continue
else:
summary_gene_classes.append(gene_class)
elif gene_class == 'std' and gene_type != 'all':
gene_sub_df = gene_df[gene_df['gene_type'] == gene_type]
plot_title = gene_type
elif gene_type == 'all':
gene_sub_df = gene_df
plot_title = 'Distribution of Genes across TADs'
sum_gene = gene_sub_df['tad_bin'].value_counts(sort=False).sort_index()
ax = sns.pointplot(x=sum_gene.index, y=sum_gene,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
ax.set(xticklabels=xlab)
ax.set(ylabel='Number of Genes', xlabel='TAD Bins')
ax.set_title(plot_title)
plt.tight_layout()
pdf.savefig()
plt.close()
# Chisquare of genes on TAD boundaries
protein_coding = gene_df[gene_df['gene_type'] == 'protein_coding']
bin_list = list(range(num_bins))[0:2] + list(range(num_bins))[-2:]
boundary_df = protein_coding[protein_coding['tad_bin'].isin(bin_list)]
num_genes_b = boundary_df.shape[0]
num_genes_c = protein_coding.shape[0] - num_genes_b
chi_test = [num_genes_b, num_genes_c]
exp = protein_coding.shape[0] / num_bins
bound_chi = chisquare(chi_test, f_exp=[exp * len(bin_list),
exp * (num_bins - len(bin_list))])
with open(gene_chi_square, 'w') as chisq_fh:
genewriter = csv.writer(chisq_fh, delimiter=',')
genewriter.writerow(['Genes at boundaries vs. center of {} TAD'
.format(tad_cell)])
genewriter.writerow(['bound', 'center'])
genewriter.writerow(chi_test)
genewriter.writerow(bound_chi)
#########################
# PART 3 - Repeats
#########################
# Process Repeats
repeat_df = repeat_df.fillna('Boundary')
repeat_df = repeat_df[repeat_df['TAD_id'] != 'Boundary']
bin_assign_repeat = repeat_df.apply(lambda x: assign_bin(x, bins=num_bins,
ID='repeat'), axis=1)
repeat_df = repeat_df.assign(tad_bin=bin_assign_repeat)
repeat_df = repeat_df[repeat_df['tad_bin'] != -1]
# Jointplot of number of repeats per TAD
repeat_df.TAD_end = repeat_df.TAD_end.astype(int)
repeat_df.TAD_start = repeat_df.TAD_start.astype(int)
plot_ready_repeat = repeat_df.assign(tad_length=np.log10(repeat_df.TAD_end
.sub(repeat_df.TAD_start)))
plot_ready_repeat = pd.DataFrame(plot_ready_repeat.groupby(['TAD_id',
'tad_length'])
.tad_bin.count()).reset_index()
plot_ready_repeat = plot_ready_repeat.assign(rep_count_alt=plot_ready_repeat
.tad_bin.div(100))
ax = sns.jointplot('tad_length', 'rep_count_alt', data=plot_ready_repeat,
kind='scatter', stat_func=None,
color=sns.xkcd_rgb['medium green'], joint_kws={'s': 3})
ax.set_axis_labels(xlabel='TAD Length (log10 kb)',
ylabel='Number of Repeats (x100)')
plt.savefig(repeat_count_file)
plt.close()
# Distribution of different classes of repeats across TADs
with PdfPages(rep_type_file) as pdf:
for repeat_type in repeat_df['repeat'].unique():
if '?' not in repeat_type:
repeat_fh = repeat_type.replace('/', '_')
rep_sub = repeat_df[repeat_df['repeat'] == repeat_type]
sum_rep = rep_sub['tad_bin'].value_counts(sort=False).sort_index()
p = sns.pointplot(x=sum_rep.index, y=sum_rep,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of Repeats', xlabel='TAD Bins')
p.set_title(repeat_type + ' Distribution')
plt.tight_layout()
pdf.savefig()
plt.close()
# Distribution of all repeats
sum_repeat = repeat_df['tad_bin'].value_counts(sort=False).sort_index()
p = sns.pointplot(x=sum_repeat.index, y=sum_repeat.div(100),
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of Repeats (x100)', xlabel='TAD Bins')
p.set_title('All Repeats Distribution')
plt.tight_layout()
plt.savefig(repeat_dist)
plt.close()
| [
"[email protected]"
]
| |
69e1dec6b346397c1857340caf4299600c26a600 | 2fe8194db578820629740e7022326355ef76632a | /instaladores/migrations/0004_merge_20201128_1647.py | 52b65ade950c986c1f9bf531762ba99d0d9e0cfe | []
| no_license | Aleleonel/newloma | 01213a14036aa7437b5951b8bb7ef202de6b86c2 | 7910c5b3170b953134240536b6e5376c96382266 | refs/heads/master | 2023-01-18T19:15:08.890658 | 2020-11-28T20:22:48 | 2020-11-28T20:22:48 | 312,459,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # Generated by Django 3.1.3 on 2020-11-28 19:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('instaladores', '0003_instaladores_email'),
('instaladores', '0002_auto_20201122_1232'),
]
operations = [
]
| [
"[email protected]"
]
| |
d90fb9bc6062203554935aaa9d2091c9aa8edcdb | 72579db4299be6d512a766ce38ae50e3c7753368 | /.history/Pythonlearning/day9_20200802091221.py | c5ab6ce577d7bd4429235686a4956391bbf742ca | []
| no_license | moteily/Python_Learning | f0d1abf360ad417112051ba52f32a141452adb2d | c294aa1e373254739fb372918507cd7dbe12c999 | refs/heads/master | 2022-11-26T11:09:48.145308 | 2020-08-04T08:47:15 | 2020-08-04T08:47:15 | 284,379,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | #接上一天的第九章
# 静态方法和类方法:
# 定义和表示:静态方法和类方法
class Myclass:
def smeth():
print('This is a static method')\
smeth = staticmethod(smeth)
def cmeth(cls) | [
"[email protected]"
]
| |
5fba23b3bfb05e91ac578ebeb773c34c16a2d760 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoEgamma/EgammaIsolationAlgos/python/eleTrackExtractorBlocks_cff.py | a0465cbb16938dc958035bcbba12f0a0b49dbf37 | [
"Apache-2.0"
]
| permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 643 | py | import FWCore.ParameterSet.Config as cms
EleIsoTrackExtractorBlock = cms.PSet(
ComponentName = cms.string('EgammaTrackExtractor'),
inputTrackCollection = cms.InputTag("generalTracks"),
DepositLabel = cms.untracked.string(''),
Diff_r = cms.double(9999.0),
Diff_z = cms.double(0.2),
DR_Max = cms.double(1.0),
DR_Veto = cms.double(0.0),
BeamlineOption = cms.string('BeamSpotFromEvent'),
BeamSpotLabel = cms.InputTag("offlineBeamSpot"),
NHits_Min = cms.uint32(0),
Chi2Ndof_Max = cms.double(1e+64),
Chi2Prob_Min = cms.double(-1.0),
Pt_Min = cms.double(-1.0),
dzOption = cms.string("vz")
)
| [
"[email protected]"
]
| |
0f679e9becb942faabe154fdacf30c7f881b2d4f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/671.py | 42a2e415e2dafaa7888c38febad69fbcb7a3fdab | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py |
FILE_NAME = 'C-large';
INPUT_FILE = FILE_NAME+'.in';
OUTPUT_FILE = FILE_NAME+'.out';
def algorithm(N, K):
segments = [N]
while K > 0:
segments.sort(reverse=True)
biggest_segment = segments[0]
del segments[0]
if(biggest_segment % 2 == 0):
left = biggest_segment / 2 - 1
right = biggest_segment / 2
else:
left = right = biggest_segment / 2
segments.append(right)
segments.append(left)
K -= 1
result = segments[-2:]
return str(result[0]) + " " + str(result[1])
def solve(data):
N = int(data[0])
K = int(data[1])
log2 = K.bit_length() - 1
pow_log2 = 2**log2
Kscaled = K/pow_log2
Nscaled = N/pow_log2
if N%pow_log2 < K%pow_log2:
Nscaled -= 1
return str(algorithm(Nscaled, Kscaled));
def run():
with open(INPUT_FILE) as in_file:
lines = in_file.readlines()
n_tests = int(lines[0]);
out_file = open(OUTPUT_FILE,'w')
count = 1
for i in range(1,len(lines)):
result = solve(lines[i].split())
string_result = "Case #%d: %s\n" % (count,result)
out_file.write(string_result);
print string_result
count += 1
# def debug(N, K):
# print "-------"
# L = K.bit_length() - 1
# print "division power 2: ", N/2**L, K/2**L
# print "reminder: ", N%(2**L), K%(2**L)
# print "correct: " , algorithm(N, K)
# print N, K, 2**L
# print "fast: ", algorithm(N/2**L , K/2**L)
# print "-------"
# def correct(N, K):
# global TEST_COUNT
# L = K.bit_length() - 1
# L2 = 2**L
# Ntest = N/L2
# if N%L2 < K%L2:
# Ntest -= 1
# Ktest = K/L2
# correct = algorithm(N, K)
# test = algorithm(Ntest, Ktest)
# if correct == test:
# #print N, K, L2, "!", N/L2, Ktest, "!", N%L2, K%L2, correct == test, "!", N-K
# print N%L2 < K%L2
# #print correct
# #print algorithm(Ntest + 1 , Ktest)
# #print algorithm(Ntest - 1 , Ktest)
# #print "-----"
run()
| [
"[email protected]"
]
| |
be370b1c9635cd0f42269dd7fcec37bb899a703c | f0ef364ed2d20390ff76bc7c5b9506cb41ba2e71 | /widgets4py/websocket/examples/w2ui_toolbar_example.py | 9f430804dd5066d43512e58a6ed47619c6c1eb7f | []
| no_license | singajeet/widgets4py | 07c983e06d6101b6421bf96224fa1bcc3793f47a | e3ca6a459dee896af755278257a914efe04b1d11 | refs/heads/master | 2020-06-09T19:08:20.295781 | 2020-02-14T15:55:23 | 2020-02-14T15:55:23 | 193,489,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | import os
import webview
from flask import Flask # , url_for
from flask_socketio import SocketIO
from widgets4py.base import Page
from widgets4py.websocket.w2ui.ui import Toolbar, ToolbarButton, ToolbarCheck
from widgets4py.websocket.w2ui.ui import ToolbarHTML, ToolbarMenu, ToolbarMenuCheck
from widgets4py.websocket.w2ui.ui import ToolbarMenuRadio, ToolbarRadio, ToolbarSeparator
from widgets4py.websocket.w2ui.ui import ToolbarDropDown, ToolbarSpacer
from multiprocessing import Process
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=None)
class W2UIPage:
pg = None
toolbar = None
tool_btn = None
tool_chk = None
tool_html = None
tool_menu = None
tool_menu_chk = None
tool_menu_rd = None
tool_rd = None
tool_sep = None
tool_dd = None
tool_spacer = None
def show_layout(self):
self.pg = Page('myPage', 'My Page')
self.toolbar = Toolbar('toolbar', socketio, onclick_callback=self._toolbar_clicked)
self.tool_btn = ToolbarButton('toolbtn', 'Button')
self.tool_chk = ToolbarCheck('tool_chk', 'Check')
self.tool_dd = ToolbarDropDown('tool_dd', 'My DropDown content', 'DropDown')
self.tool_html = ToolbarHTML('tool_html', '<input type=text />', 'Html')
self.tool_menu = ToolbarMenu('tool_menu', 'Actions')
self.tool_menu.add_item('Add')
self.tool_menu.add_item('Insert')
self.tool_menu.add_item('Remove')
self.tool_menu.add_item('Show')
self.tool_menu.add_item('Hide')
self.tool_menu.add_item('Enable')
self.tool_menu.add_item('Disable')
self.tool_menu_chk = ToolbarMenuCheck('tool_menu_chk', 'MenuCheck')
self.tool_menu_chk.add_item('item1', 'Item1')
self.tool_menu_chk.add_item('item2', 'Item2')
self.tool_menu_rd = ToolbarMenuRadio('tool_menu_rd', 'MenuRadio')
self.tool_menu_rd.add_item('item1', 'Item1')
self.tool_menu_rd.add_item('item2', 'Item2')
self.tool_rd = ToolbarRadio('tool_rd', 'Radio')
self.tool_sep = ToolbarSeparator('tool_sep', 'Sep')
self.tool_spacer = ToolbarSpacer('tool_spacer', 'Spac')
self.toolbar.add(self.tool_btn)
self.toolbar.add(self.tool_chk)
self.toolbar.add(self.tool_dd)
self.toolbar.add(self.tool_html)
self.toolbar.add(self.tool_menu)
self.toolbar.add(self.tool_menu_chk)
self.toolbar.add(self.tool_menu_rd)
self.toolbar.add(self.tool_rd)
self.toolbar.add(self.tool_sep)
self.toolbar.add(self.tool_spacer)
self.pg.add(self.toolbar)
content = self.pg.render()
return content
def _toolbar_clicked(self, name, props):
menu = self.toolbar.clicked_item
if str(menu).find(':') > 0:
item = str(menu).split(':')[1]
if item.upper() == 'ADD':
new_btn = ToolbarButton('new_btn', 'New Button')
self.toolbar.add_item(new_btn)
if item.upper() == 'INSERT':
new_ins_btn = ToolbarButton('new_ins_btn', 'New Insert Button')
self.toolbar.insert_item(new_ins_btn, 'tool_btn')
if item.upper() == 'REMOVE':
self.toolbar.remove_item('new_ins_btn')
if item.upper() == 'HIDE':
self.toolbar.hide_item('toolbtn')
if item.upper() == 'SHOW':
self.toolbar.show_item('toolbtn')
if item.upper() == 'ENABLE':
self.toolbar.enable_item('toolbtn')
if item.upper() == 'DISABLE':
self.toolbar.disable_item('toolbtn')
def start_app():
p = W2UIPage()
app.add_url_rule('/', 'index', p.show_layout)
socketio.run(app, debug=True)
def start_web_view():
webview.create_window("My Application", "http://localhost:5000", resizable=True)
if __name__ == "__main__":
if os.uname().machine == 'aarch64':
start_app()
else:
app_proc = Process(target=start_app)
web_app = Process(target=start_web_view)
app_proc.start()
web_app.start()
app_proc.join()
web_app.join()
| [
"[email protected]"
]
| |
cf2901edbd6511a02d111b4d1c700a63f479a31e | d27a97334691bd4dcce72f772b382aacda5ab26f | /tests/rdf_album.py | fe438dcfc34744a41d358fd2a69623c7dfcc289e | []
| no_license | qood/vgmdb | e238c19d437eeb609466504d2a5d92416f936987 | 978f2245be746ea37faed2707e56c6002b8a0426 | refs/heads/master | 2021-01-24T01:11:25.427263 | 2015-08-05T05:41:50 | 2015-08-05T05:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,611 | py | # -*- coding: UTF-8 -*-
import os
import datetime
import unittest
import decimal
from ._rdf import TestRDF
from vgmdb.parsers import album
from vgmdb.config import BASE_URL
from urlparse import urljoin
class TestAlbumsRDF(TestRDF):
data_parser = lambda self,x: album.parse_page(x)
outputter_type = 'album'
def setUp(self):
pass
def run_ff8_tests(self, graph):
test_count_results = {
"select ?type where { <@base#subject> rdf:type mo:Release . }" : 1,
"select ?type where { <@base#subject> rdf:type schema:MusicAlbum . }" : 1,
"select ?type where { <@base#composition> rdf:type mo:Composition . }" : 1,
"select ?type where { <@base#composition> rdf:type schema:CreativeWork . }" : 1,
"select ?type where { <@base#musicalwork> rdf:type mo:MusicalWork . }" : 1,
"select ?type where { <@base#musicalwork> rdf:type schema:CreativeWork . }" : 1,
"select ?type where { <@base#performance> rdf:type mo:Performance . }" : 1,
"select ?type where { <@base#performance> rdf:type schema:Event . }" : 1,
"select ?person where { <@base#subject> schema:byArtist ?person . }" : 8,
"select ?person where { ?person foaf:made <@base#subject> . }" : 3,
"select ?composition where { <@base/artist/77#subject> foaf:made <@base#subject> . }" : 1,
"select ?composition where { <@base/artist/77#subject> foaf:made <@base#composition> . }" : 1,
"select ?person where { <@base#composition> mo:composer ?person . }" : 1,
"select ?person where { <@base#performance> mo:performer ?person . }" : 8,
"select ?person where { ?person foaf:made <@base#lyrics> . }" : 2,
"select ?record where { <@base#subject> mo:record ?record }" : 1,
"select ?track where { <@base#subject> mo:record ?record . ?record mo:track ?track . }" : 13,
"select ?track where { <@base#subject> mo:record ?record . ?record schema:track ?track . }" : 13,
"select ?track where { <@base#subject> mo:record ?record . ?track schema:inPlaylist ?record . }" : 13
}
test_first_result = {
"select ?expression where { <@base#subject> mo:publication_of ?expression . }" : "<@base#musicalexpression>",
"select ?album where { <@base#musicalexpression> mo:published_as ?album . }" : "<@base#subject>",
"select ?performance where { <@base#musicalexpression> mo:records ?performance . }" : "<@base#performance>",
"select ?expression where { <@base#performance> mo:recorded_as ?expression . }" : "<@base#musicalexpression>",
"select ?work where { <@base#performance> mo:performance_of ?work . }" : "<@base#musicalwork>",
"select ?performance where { <@base#musicalwork> mo:performed_in ?performance . }" : "<@base#performance>",
"select ?composed where { <@base#musicalwork> mo:composed_in ?composed . }" : "<@base#composition>",
"select ?work where { <@base#composition> mo:produced_work ?work . }" : "<@base#musicalwork>",
"select ?lyrics where { <@base#musicalwork> mo:lyrics ?lyrics . }" : "<@base#lyrics>",
"select ?about where { <@base#subject> schema:about ?about . } " : "<@baseproduct/189#subject>",
"select ?name where { <@base#subject> schema:about ?about . ?about schema:name ?name . filter(lang(?name)='en')} " : u'Final Fantasy VIII',
"select ?name where { <@base#subject> schema:about ?about . ?about schema:name ?name . filter(lang(?name)='ja')} " : u'ファイナルファンタジーVIII',
"select ?name where { ?album rdf:type mo:Release . ?album dcterms:title ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?name where { ?album rdf:type mo:Release . ?album schema:name ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?name where { ?album rdf:type mo:Performance . ?album schema:name ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?name where { ?album rdf:type mo:Composition . ?album schema:name ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?catalog where { <@base#subject> mo:catalogue_number ?catalog . }" : "SSCX-10037",
"select ?catalog where { <@base#subject> mo:other_release_of ?release . ?release mo:catalogue_number ?catalog . } order by desc(?catalog)" : "SQEX-10025",
"select ?date where { ?album rdf:type schema:MusicAlbum . ?album dcterms:created ?date . }" : datetime.date(1999,11,20),
"select ?name where { <@base#performance> mo:performer ?person . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?name where { <@base#performance> schema:byArtist ?person . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?name where { <@base#performance> schema:byArtist ?person . ?person rdf:type schema:Person . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?name where { ?person mo:performed <@base#performance> . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?records where { <@base#subject> mo:record_count ?records . }" : 1,
"select ?tracks where { <@base#subject> mo:record ?record . ?record mo:track_count ?tracks . }" : 13,
"select ?length where { <@base#subject> mo:record ?record . ?record mo:track ?track . ?track mo:track_number \"1\"^^xsd:integer . ?track schema:duration ?length . }" : "PT3:09",
"select ?length where { <@base#subject> mo:record ?record . ?record schema:duration ?length . }" : "PT64:16",
"select ?name where { <@base#subject> mo:record ?record . ?record mo:track ?track . ?track mo:track_number \"1\"^^xsd:integer . ?track schema:name ?name . filter(lang(?name)='en')}" : "Liberi Fatali",
"select ?name where { <@base#subject> mo:record ?record . ?record mo:track ?track . ?track mo:track_number \"1\"^^xsd:integer . ?track dcterms:title ?name . filter(lang(?name)='en')}" : "Liberi Fatali",
"select ?publisher where { <@base#subject> mo:publisher ?publisher . }" : "<@baseorg/54#subject>",
"select ?name where { <@base#subject> schema:publisher ?publisher . ?publisher foaf:name ?name . filter(lang(?name)='en') }" : "DigiCube",
"select ?composer where { <@base#composition> mo:composer ?composer . }" : "<@base/artist/77#subject>",
"select ?name where { <@base#composition> mo:composer ?composer . ?composer foaf:name ?name . filter(lang(?name)='en') }" : "Nobuo Uematsu",
"select ?rating where { <@base#subject> schema:aggregateRating ?agg . ?agg schema:ratingValue ?rating . }" : decimal.Decimal("4.47"),
"select ?rating where { <@base#subject> schema:aggregateRating ?agg . ?agg schema:ratingCount ?rating . }" : 43,
"select ?rating where { <@base#subject> schema:aggregateRating ?agg . ?agg schema:bestRating ?rating . }" : 5,
"select ?cover where { <@base#subject> foaf:depiction ?cover . ?cover a foaf:Image }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?cover where { <@base#subject> schema:image ?cover . ?cover a schema:ImageObject }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?cover where { ?cover foaf:depicts <@base#subject> . }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?cover where { ?cover schema:about <@base#subject> . }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?thumb where { <@base#subject> foaf:depiction ?cover . ?cover foaf:thumbnail ?thumb . ?thumb a foaf:Image }" : "<http://vgmdb.net/db/assets/covers-medium/7/9/79-1190730814.jpg>",
"select ?thumb where { <@base#subject> schema:image ?cover . ?cover schema:thumbnailUrl ?thumb . ?thumb a schema:ImageObject }" : "<http://vgmdb.net/db/assets/covers-medium/7/9/79-1190730814.jpg>"
}
self.run_tests(graph, test_count_results, test_first_result)
def test_ff8_rdfa(self):
graph = self.load_rdfa_data('album_ff8.html')
self.run_ff8_tests(graph)
def test_ff8_rdf(self):
graph = self.load_rdf_data('album_ff8.html')
self.run_ff8_tests(graph)
def run_bootleg_tests(self, graph):
test_count_results = {
}
test_first_result = {
"select ?catalog where { <@base#subject> mo:catalogue_number ?catalog . } order by desc(?catalog)" : "GAME-119",
"select ?catalog where { <@base#subject> mo:other_release_of ?release . ?release mo:catalogue_number ?catalog . } order by desc(?catalog)" : "N30D-021"
}
self.run_tests(graph, test_count_results, test_first_result)
def test_bootleg_rdfa(self):
graph = self.load_rdfa_data('album_bootleg.html')
self.run_bootleg_tests(graph)
def test_bootleg_rdf(self):
graph = self.load_rdf_data('album_bootleg.html')
self.run_bootleg_tests(graph)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
23ee2ea3fb54a9d1d459ca0edb986191ba823dca | 3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2 | /src/Week9/Efficiency/Sequencing.py | 5cd59f4ad5c2b4f90a8180536091d1c58517304a | []
| no_license | theguyoverthere/CMU15-112-Spring17 | b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8 | b8287092b14e82d2a3aeac6c27bffbc95382eb34 | refs/heads/master | 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # what is the total cost here?
L = [ 52, 83, 78, 9, 12, 4 ] # assume L is an arbitrary list of length N
L.sort() # This is O(NlogN)
L.sort(reverse=True) # This is O(NlogN)
L[0] -= 5 # This is O(1)
print(L.count(L[0]) + sum(L)) # This is O(N) + O(N)
| [
"[email protected]"
]
| |
ee1620b5cccb60aa52d2725d3e10e369eb226f0f | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/suntimes/testcase/firstcases/testcase1_004.py | af83c435e940513a3fe6bb22542eaddd2ba85ec4 | []
| no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.forrestguice.suntimeswidget',
'appActivity' : 'com.forrestguice.suntimeswidget.SuntimesActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.forrestguice.suntimeswidget/com.forrestguice.suntimeswidget.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"moonrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(9)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"2:50\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunset\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"1_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.forrestguice.suntimeswidget'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"[email protected]"
]
| |
85d86a4f98edb5e00f54a8dadafebb34f0999ee8 | 60284a471e48e49e9b184305b08da38cbaf85c38 | /src/tests/ftest/datamover/posix_symlinks.py | fd9e6762fe298a9f0f9f8a40b9bffbe555cdb234 | [
"BSD-2-Clause-Patent",
"BSD-2-Clause"
]
| permissive | minmingzhu/daos | 734aa37c3cce1c4c9e777b151f44178eb2c4da1f | 9f095c63562db03e66028f78df0c37f1c05e2db5 | refs/heads/master | 2022-05-10T17:23:32.791914 | 2022-02-28T18:44:50 | 2022-02-28T18:44:50 | 228,773,662 | 1 | 0 | Apache-2.0 | 2019-12-18T06:30:39 | 2019-12-18T06:30:38 | null | UTF-8 | Python | false | false | 9,058 | py | #!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from data_mover_test_base import DataMoverTestBase
from os.path import join
class DmvrPosixSymlinks(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Test class for POSIX DataMover symlink validation
Test Class Description:
Tests POSIX DataMover symlink copying and dereferencing.
:avocado: recursive
"""
def test_dm_posix_symlinks(self):
"""JIRA id: DAOS-5998
Test Description:
Tests copying POSIX symlinks with dcp.
:avocado: tags=all,full_regression
:avocado: tags=datamover,dcp,dfuse
:avocado: tags=dm_posix_symlinks,dm_posix_symlinks_dcp
"""
self.run_dm_posix_symlinks("DCP")
def run_dm_posix_symlinks(self, tool):
"""
Use Cases:
1. Create pool
2. Create container
3. Create symlink structure:
- Links that point to files
- Links that point to directories
- Links that point to other links
- Links that point forward multiple levels
- Links that point backward one level
- Links that are transitive (link -> dir -> link)
4. Test copying between DAOS and POSIX
Args:
tool (str): The DataMover tool to run the test with.
Must be a valid tool in self.TOOLS.
NOTE:
Different symlink structures are created with the
create_links_* functions, where each structure tests
some part of the uses cases above.
"""
# Set the tool to use
self.set_tool(tool)
# Start dfuse to hold all pools/containers
self.start_dfuse(self.dfuse_hosts)
# Create 1 pool
pool1 = self.create_pool()
# Create a special container to hold UNS entries
uns_cont = self.create_cont(pool1)
# Test links that point forward
container1 = self.create_cont(pool1, True, pool1, uns_cont)
self.run_dm_posix_symlinks_fun(
pool1, container1, self.create_links_forward, "forward")
# Test links that point backward
container2 = self.create_cont(pool1, True, pool1, uns_cont)
self.run_dm_posix_symlinks_fun(
pool1, container2, self.create_links_backward, "backward")
# Test a mix of forward and backward links
container3 = self.create_cont(pool1, True, pool1, uns_cont)
self.run_dm_posix_symlinks_fun(
pool1, container3, self.create_links_mixed, "mixed")
def run_dm_posix_symlinks_fun(self, pool, cont, link_fun, link_desc):
"""
Tests copying symlinks with and without --dereference.
Args:
pool (TestPool): The pool to use
cont (TestContainer): The container for both src and dst
link_fun (str -> void): The function for creating the
symlink structure. A path is passed for the location.
link_desc (str): A description about the link_fun.
Used in logging.
"""
# Get the dereference param
do_deref = self.params.get(
"dereference", "/run/{}/*".format(self.tool.lower()))
# Use a common test_desc
test_desc = self.test_id + "({})".format(link_desc)
test_desc += " (dereference={})".format(str(do_deref))
self.log.info("Running %s", test_desc)
# Get a directory for POSIX
posix_test_path = self.new_posix_test_path()
# Save some paths and encode the type in the path for easier debugging
src_daos_dir = "/src_" + link_desc
src_daos_path = cont.path.value + src_daos_dir
src_posix_path = join(posix_test_path, "src_" + link_desc)
# Create the source links
link_fun(src_daos_path)
link_fun(src_posix_path)
if do_deref:
# Use POSIX cp to create a baseline for dereferencing
deref_baseline_path = join(posix_test_path, "baseline_" + link_desc)
self.execute_cmd("cp -r --dereference '{}' '{}'".format(
src_posix_path, deref_baseline_path))
diff_src = deref_baseline_path
else:
# Just compare against the original
diff_src = src_posix_path
# DAOS -> DAOS
dst_daos_dir = self.new_daos_test_path(create=False)
self.run_datamover(
test_desc + " (DAOS->DAOS)",
"DAOS", src_daos_dir, pool, cont,
"DAOS", dst_daos_dir, pool, cont)
self.run_diff(diff_src, cont.path.value + dst_daos_dir, do_deref)
# DAOS -> POSIX
dst_posix_path = self.new_posix_test_path(create=False)
self.run_datamover(
test_desc + " (DAOS->POSIX)",
"DAOS", src_daos_dir, pool, cont,
"POSIX", dst_posix_path)
self.run_diff(diff_src, dst_posix_path)
# POSIX -> DAOS
dst_daos_dir = self.new_daos_test_path(create=False)
self.run_datamover(
test_desc + " (POSIX->DAOS)",
"POSIX", src_posix_path, None, None,
"DAOS", dst_daos_dir, pool, cont)
self.run_diff(diff_src, cont.path.value + dst_daos_dir, do_deref)
def create_links_forward(self, path):
"""
Creates forward symlinks up to 3 levels deep.
Args:
path (str): The path to create the links in
Description:
- Links that point to files
- Links that point to directories
- Links that point to other links
- Links that point forward multiple levels deep
- Links that are transitive (link -> dir -> link)
"""
cmd_list = [
"mkdir -p " + path + "/dir1.1/dir1.2/dir1.3",
"pushd " + path,
# Level 4: one file
"echo 'file1.4' > dir1.1/dir1.2/dir1.3/file1.4",
# Level 3: one file, links to file and dir
"echo 'file1.3' > dir1.1/dir1.2/file1.3",
"ln -s file1.3 ./dir1.1/dir1.2/link1.3",
"ln -s dir1.3 ./dir1.1/dir1.2/link2.3",
# Level 2: links to level 3
"ln -s dir1.2/file1.3 ./dir1.1/link1.2",
"ln -s dir1.2/dir1.3 ./dir1.1/link2.2",
"ln -s dir1.2/link1.3 ./dir1.1/link3.2",
"ln -s dir1.2/link2.3 ./dir1.1/link4.2",
# Level 1: Links to level 2 and level 3
"ln -s dir1.1/dir1.2 ./link1.1",
"ln -s dir1.1/link1.2 ./link2.1",
"ln -s dir1.1/link2.2 ./link3.1",
"ln -s dir1.1/link3.2 ./link4.1",
"ln -s dir1.1/link4.2 ./link5.1",
"ln -s dir1.1/dir1.2/file1.3 ./link6.1",
"ln -s dir1.1/dir1.2/dir1.3 ./link7.1",
"ln -s dir1.1/dir1.2/link1.3 ./link8.1",
"ln -s dir1.1/dir1.2/link2.3 ./link9.1",
"popd"
]
self.execute_cmd_list(cmd_list)
def create_links_backward(self, path):
"""
Creates backward symlinks 1 level deep.
../../ is not yet supported.
Args:
path (str): The path to create the links in
Description:
- Links that point to files
- Links that point to links
- Links that point backward, one level up
"""
cmd_list = [
"mkdir -p " + path + "/dir1.1/dir1.2/",
"pushd " + path,
# Level 1: one file and two links
"echo 'file1.1' > ./file1.1",
"ln -s file1.1 ./link1.1",
"ln -s link1.1 ./link2.1",
# Level 2: links to level 1
"ln -s ../file1.1 ./dir1.1/link1.2",
"ln -s ../link1.1 ./dir1.1/link2.2",
"popd"
]
self.execute_cmd_list(cmd_list)
def create_links_mixed(self, path):
"""
Creates a mix of forward and backward links.
Level 1 -> Level 3 -> Level 2
Args:
path (str): The path to create the links in
Description:
- Links that point to files
- Links that point to links
- Links that traverse forward and backward
"""
cmd_list = [
"mkdir -p " + path + "/dir1.1/dir1.2/",
"pushd " + path,
# Level 1: link to level 3
"ln -s dir1.1/dir1.2/link1.3 ./link1.1",
# Level 3: one file, link to level 2
"echo 'file1.3' > ./dir1.1/dir1.2/file1.3",
"ln -s ../link1.2 ./dir1.1/dir1.2/link1.3",
# Level 2: link to level 3
"ln -s dir1.2/file1.3 ./dir1.1/link1.2",
"popd"
]
self.execute_cmd_list(cmd_list)
def execute_cmd_list(self, cmd_list):
"""Execute a list of commands, separated by &&.
Args:
cmd_list (list): A list of commands to execute.
"""
cmd = " &&\n".join(cmd_list)
self.execute_cmd(cmd)
| [
"[email protected]"
]
| |
ad3b703785a4e63fadd304fe931f34553ff93077 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /tests/test_phl_cpu.py | 31e28bd39d8728b69f948db45d80ae5f98ade8d0 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
]
| permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 9,018 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers.filtering import PHLFilter
from tests.utils import skip_if_no_cpp_extension
TEST_CASES = [
[
# Case Description
"2 batches, 1 dimensions, 1 channels, 1 features",
# Sigmas
[1, 0.2],
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 0.2, 0.5, 0, 1],
],
# Batch 1
[
# Channel 0
[0.5, 0, 1, 1, 1]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.468968, 0.364596, 0.4082, 0.332579, 0.468968]
],
# Batch 1
[
# Channel 0
[0.202473, 0.176527, 0.220995, 0.220995, 0.220995]
],
],
],
[
# Case Description
"1 batches, 1 dimensions, 3 channels, 1 features",
# Sigmas
[1],
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 0],
# Channel 1
[0, 0, 0, 0, 1],
# Channel 2
[0, 0, 1, 0, 0],
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 0.2, 0.5, 0.2, 1],
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.229572, 0.182884, 0.202637, 0.182884, 0.229572],
# Channel 1
[0.229572, 0.182884, 0.202637, 0.182884, 0.229572],
# Channel 2
[0.201235, 0.208194, 0.205409, 0.208194, 0.201235],
],
],
],
[
# Case Description
"1 batches, 2 dimensions, 1 channels, 3 features",
# Sigmas
[5, 3, 3],
# Input
[
# Batch 0
[
# Channel 0
[[9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 6, 6, 6], [9, 9, 6, 6, 6]]
],
],
# Features
[
# Batch 0
[
# Channel 0
[[9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 6, 6, 6], [9, 9, 6, 6, 6]],
# Channel 1
[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
# Channel 2
[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]],
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
[7.696051, 7.427121, 1.191990, 1.156004, 1.157489],
[7.670297, 7.371155, 1.340232, 1.287871, 1.304018],
[7.639579, 7.365163, 1.473319, 1.397826, 1.416861],
[7.613517, 7.359183, 5.846500, 5.638952, 5.350098],
[7.598255, 7.458446, 5.912375, 5.583625, 5.233126],
]
],
],
],
[
# Case Description
"1 batches, 3 dimensions, 1 channels, 1 features",
# Sigmas
[5, 3, 3],
# Input
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
]
],
],
# Features
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
# Frame 0
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
],
# Frame 1
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
],
# Frame 2
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
# Frame 3
[
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
# Frame 4
[
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
]
],
],
],
]
@skip_if_no_cpp_extension
class PHLFilterTestCaseCpu(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_cpu(self, test_case_description, sigmas, input, features, expected):
# Create input tensors
input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu"))
feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu"))
# apply filter
output = PHLFilter.apply(input_tensor, feature_tensor, sigmas).cpu().numpy()
# Ensure result are as expected
np.testing.assert_allclose(output, expected, atol=1e-4)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
723faaf18a590d38c7b2d7ddbf82a2f78035fdb4 | bb6ebff7a7f6140903d37905c350954ff6599091 | /third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py | f65b682fea8a8d1e1f1c13f0fda30331da23efb3 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 10,886 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port import Port, Driver, DriverOutput
from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
# FIXME: remove the dependency on TestWebKitPort
from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
from webkitpy.tool.mocktool import MockOptions
class DriverTest(unittest.TestCase):
def make_port(self):
port = Port(MockSystemHost(), 'test', MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: '/mock-checkout/out/' + configuration
return port
def _assert_wrapper(self, wrapper_string, expected_wrapper):
wrapper = Driver(self.make_port(), None, pixel_tests=False)._command_wrapper(wrapper_string)
self.assertEqual(wrapper, expected_wrapper)
def test_command_wrapper(self):
self._assert_wrapper(None, [])
self._assert_wrapper("valgrind", ["valgrind"])
# Validate that shlex works as expected.
command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
self._assert_wrapper(command_with_spaces, expected_parse)
def test_test_to_uri(self):
port = self.make_port()
driver = Driver(port, None, pixel_tests=False)
self.assertEqual(driver.test_to_uri('foo/bar.html'), 'file://%s/foo/bar.html' % port.layout_tests_dir())
self.assertEqual(driver.test_to_uri('http/tests/foo.html'), 'http://127.0.0.1:8000/foo.html')
self.assertEqual(driver.test_to_uri('http/tests/ssl/bar.html'), 'https://127.0.0.1:8443/ssl/bar.html')
def test_uri_to_test(self):
port = self.make_port()
driver = Driver(port, None, pixel_tests=False)
self.assertEqual(driver.uri_to_test('file://%s/foo/bar.html' % port.layout_tests_dir()), 'foo/bar.html')
self.assertEqual(driver.uri_to_test('http://127.0.0.1:8000/foo.html'), 'http/tests/foo.html')
self.assertEqual(driver.uri_to_test('https://127.0.0.1:8443/ssl/bar.html'), 'http/tests/ssl/bar.html')
def test_read_block(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=False)
driver._server_process = MockServerProcess(lines=[
'ActualHash: foobar',
'Content-Type: my_type',
'Content-Transfer-Encoding: none',
"#EOF",
])
content_block = driver._read_block(0)
self.assertEqual(content_block.content, '')
self.assertEqual(content_block.content_type, 'my_type')
self.assertEqual(content_block.encoding, 'none')
self.assertEqual(content_block.content_hash, 'foobar')
driver._server_process = None
def test_read_binary_block(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
driver._server_process = MockServerProcess(lines=[
'ActualHash: actual',
'ExpectedHash: expected',
'Content-Type: image/png',
'Content-Length: 9',
"12345678",
"#EOF",
])
content_block = driver._read_block(0)
self.assertEqual(content_block.content_type, 'image/png')
self.assertEqual(content_block.content_hash, 'actual')
self.assertEqual(content_block.content, '12345678\n')
self.assertEqual(content_block.decoded_content, '12345678\n')
driver._server_process = None
def test_read_base64_block(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
driver._server_process = MockServerProcess(lines=[
'ActualHash: actual',
'ExpectedHash: expected',
'Content-Type: image/png',
'Content-Transfer-Encoding: base64',
'Content-Length: 12',
'MTIzNDU2NzgK#EOF',
])
content_block = driver._read_block(0)
self.assertEqual(content_block.content_type, 'image/png')
self.assertEqual(content_block.content_hash, 'actual')
self.assertEqual(content_block.encoding, 'base64')
self.assertEqual(content_block.content, 'MTIzNDU2NzgK')
self.assertEqual(content_block.decoded_content, '12345678\n')
def test_no_timeout(self):
port = TestWebKitPort()
port._config.build_directory = lambda configuration: '/mock-checkout/out/' + configuration
driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
self.assertEqual(driver.cmd_line(True, []), ['/mock-checkout/out/Release/content_shell', '--no-timeout', '--dump-render-tree', '-'])
def test_check_for_driver_crash(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
class FakeServerProcess(object):
def __init__(self, crashed):
self.crashed = crashed
def pid(self):
return 1234
def name(self):
return 'FakeServerProcess'
def has_crashed(self):
return self.crashed
def stop(self, timeout=0.0):
pass
def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False, leaked=False):
self.assertEqual(driver._check_for_driver_crash(error_line), crashed)
self.assertEqual(driver._crashed_process_name, name)
self.assertEqual(driver._crashed_pid, pid)
self.assertEqual(driver._subprocess_was_unresponsive, unresponsive)
self.assertEqual(driver._check_for_leak(error_line), leaked)
driver.stop()
driver._server_process = FakeServerProcess(False)
assert_crash(driver, '', False, None, None)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED\n', True, 'FakeServerProcess', 1234)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED - WebProcess\n', True, 'WebProcess', None)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED - WebProcess (pid 8675)\n', True, 'WebProcess', 8675)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#PROCESS UNRESPONSIVE - WebProcess (pid 8675)\n', True, 'WebProcess', 8675, True)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED - renderer (pid 8675)\n', True, 'renderer', 8675)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#LEAK - renderer pid 8675 ({"numberOfLiveDocuments":[2,3]})\n', False, None, None, False, True)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(True)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '', True, 'FakeServerProcess', 1234)
def test_creating_a_port_does_not_write_to_the_filesystem(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
self.assertEqual(port._filesystem.written_files, {})
self.assertEqual(port._filesystem.last_tmpdir, None)
def test_stop_cleans_up_properly(self):
port = TestWebKitPort()
port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
last_tmpdir = port._filesystem.last_tmpdir
self.assertNotEquals(last_tmpdir, None)
driver.stop()
self.assertFalse(port._filesystem.isdir(last_tmpdir))
def test_two_starts_cleans_up_properly(self):
port = TestWebKitPort()
port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
last_tmpdir = port._filesystem.last_tmpdir
driver._start(True, [])
self.assertFalse(port._filesystem.isdir(last_tmpdir))
def test_start_actually_starts(self):
port = TestWebKitPort()
port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
self.assertTrue(driver._server_process.started)
| [
"[email protected]"
]
| |
9dbc5aad569ad45d58831448aa34a51bc8258984 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02996/s612893539.py | 7bab9fefb9759e4aca7500b4bfc54fe21ec5e098 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import sys
import math
import itertools
import bisect
from copy import copy
from collections import deque,Counter
from decimal import Decimal
def s(): return input()
def i(): return int(input())
def S(): return input().split()
def I(): return map(int,input().split())
def L(): return list(input().split())
def l(): return list(map(int,input().split()))
def lcm(a,b): return a*b//math.gcd(a,b)
sys.setrecursionlimit(10 ** 9)
mod = 10**9+7
S = i()
time = []
for i in range(S):
a = l()
a.reverse()
time.append(a)
time.sort()
pl = 0
for i in range(S):
pl += time[i][1]
if pl > time[i][0]:
print("No")
sys.exit()
print("Yes")
| [
"[email protected]"
]
| |
2146132029e154c9162b74995f0cf34f0ef3342e | 60654caf2633613021470d0285817343f76223e5 | /daily_catch/public_update/config.py | 566a37dc4796f6f4c390e00778aea0555a926b77 | []
| no_license | whoiskx/com_code | 79460ccee973d1dfe770af3780c273e4a0f466c9 | 388b5a055393ee7768cc8525c0484f19c3f97193 | refs/heads/master | 2020-04-09T23:14:28.228729 | 2018-12-06T07:10:25 | 2018-12-06T07:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # -*- coding: utf-8 -*-
import os
read_ver_url = 'http://dispatch.yunrunyuqing.com:38082/resources/sourceVersion/weixin/version.txt'
download_url = 'http://dispatch.yunrunyuqing.com:38082/resources/sourceVersion/weixin/public_update.zip'
base_path = os.path.dirname(os.path.abspath(__file__))
core_spider_path = os.path.join(base_path, 'public_update')
core_zip_path = os.path.join(core_spider_path, 'public_update.zip')
version_txt_path = os.path.join(core_spider_path, 'version.txt')
spider_path = os.path.join(core_spider_path, 'daily_collect')
run_path = os.path.join(spider_path, 'daily_collect.py')
kill_path = 'daily_collect.py'
| [
"[email protected]"
]
| |
0b313513a4e40c31df181c98f2e15203095458e5 | 9bfd93b93531c7d66335fffded2d00db0c1f8935 | /blog_censurfridns_dk/blog/translation.py | 9e8157edd7537f26fe16f55c391113b0d9039730 | []
| no_license | mortensteenrasmussen/blog.censurfridns.dk | 7d5da3961b6abf4124fddba7b1fdf5a4fc014c2c | 53939dee90ad5028256aace4c876d38695ec9e07 | refs/heads/master | 2021-01-14T14:23:17.443442 | 2016-08-29T20:11:22 | 2016-08-29T20:11:22 | 65,412,684 | 0 | 0 | null | 2016-08-10T20:03:31 | 2016-08-10T20:03:31 | null | UTF-8 | Python | false | false | 412 | py | from modeltranslation.translator import register, TranslationOptions
from .models import BlogPost
from taggit.models import Tag
@register(BlogPost)
class BlogPostTranslationOptions(TranslationOptions):
fields = ('title', 'body', 'slug')
required_languages = ('en', 'da')
@register(Tag)
class TaggitTranslations(TranslationOptions):
fields = ('name','slug')
required_languages = ('en', 'da')
| [
"[email protected]"
]
| |
60de944ffe3715da94961884dba29a2e0af82137 | 2937d60b7f5259b4899ba5af08146bd874529a67 | /Assignment 5 q4.py | d9776a0e669e961e49153c7ebd3133b4fe52a833 | []
| no_license | gourav47/Let-us-learn-python | 9a2302265cb6c47e74863359c79eef5a3078358a | b324f2487de65b2f073b54c8379c1b9e9aa36298 | refs/heads/master | 2021-06-27T03:33:27.483992 | 2021-01-07T12:26:16 | 2021-01-07T12:26:16 | 204,323,390 | 1 | 1 | null | 2020-07-19T14:25:12 | 2019-08-25T16:53:56 | Python | UTF-8 | Python | false | false | 212 | py | '''python script to print square of numbers from a to b'''
a=int(input("Enter the first number: "))
b=int(input("Enter second number: "))
if a>b:
a,b=b,a
for i in range(a,b+1):
print(i**2,end=' ')
| [
"[email protected]"
]
| |
e43dc1c4687a848c69bf66702ed69a995ab3c08f | 1291b1974904918accf29f87c2d303e679297e03 | /038comp2dates.py | 8f57b80d77fd8950587bb9796dea50f4a02bffb6 | []
| no_license | utkarsh192000/PythonAdvance | 853a594ed678c462d9604a8bd6300aa0449dd610 | e2e5f3059050b94f2e89ba681ddd6e3f01091b97 | refs/heads/master | 2023-04-05T09:42:34.942443 | 2021-04-04T19:07:18 | 2021-04-04T19:07:18 | 354,625,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py |
from datetime import *
d1=date(2021,3,23)
d2=date(2010,3,23)
print(d1<d2)
print(d1>d2)
print(d1==d2) | [
"[email protected]"
]
| |
d7c0d7693181b79f9f44abbeaedd2d8e7988f5ff | caa14cf78fe15affc96acc3de6f4fb1b54bcdf70 | /sap/sap/saplib/tests/test_saputils.py | 6eec437d874842f6cbed599b9adb923e141e3f69 | []
| no_license | jesstherobot/Sycamore_FPGA | 2e3f0dea21482de87ea444506ae2af3f58b5a344 | d1096e15f07b17a8dcb2276e312c5ba3e0006632 | refs/heads/master | 2021-01-18T07:57:14.268157 | 2011-10-19T22:46:28 | 2011-10-19T22:46:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,689 | py | import unittest
import sys
import os
class Test (unittest.TestCase):
"""Unit test for saputils"""
def setUp(self):
os.environ["SAPLIB_BASE"] = sys.path[0] + "/saplib"
#print "SAPLIB_BASE: " + os.getenv("SAPLIB_BASE")
def test_create_dir(self):
"""create a directory"""
import saputils
result = saputils.create_dir("~/sandbox/projects")
self.assertEqual(result, True)
def test_remove_comments(self):
"""try and remove all comments from a buffer"""
import saputils
bufin = "not comment /*comment\n\n*/\n\n//comment\n\n/*\nabc\n*/soemthing//comment"
#print "input buffer:\n" + bufin
output_buffer = saputils.remove_comments(bufin)
#print "output buffer:\n" + bufout
self.assertEqual(len(output_buffer) > 0, True)
def test_find_rtl_file_location(self):
"""give a filename that should be in the RTL"""
import saputils
result = saputils.find_rtl_file_location("simple_gpio.v")
#print "file location: " + result
try:
testfile = open(result)
result = True
testfile.close()
except:
result = False
self.assertEqual(result, True)
def test_resolve_linux_path(self):
"""given a filename with or without the ~ return a filename with the ~ expanded"""
import saputils
filename1 = "/filename1"
filename = saputils.resolve_linux_path(filename1)
#print "first test: " + filename
#if (filename == filename1):
# print "test1: they are equal!"
self.assertEqual(filename == "/filename1", True)
filename2 = "~/filename2"
filename = saputils.resolve_linux_path(filename2)
correct_result = os.path.expanduser("~") + "/filename2"
#print "second test: " + filename + " should equal to: " + correct_result
#if (correct_result == filename):
# print "test2: they are equal!"
self.assertEqual(correct_result == filename, True)
filename = filename.strip()
def test_read_slave_tags(self):
"""try and extrapolate all info from the slave file"""
import saputils
base_dir = os.getenv("SAPLIB_BASE")
filename = base_dir + "/hdl/rtl/wishbone/slave/simple_gpio/simple_gpio.v"
drt_keywords = [
"DRT_ID",
"DRT_FLAGS",
"DRT_SIZE"
]
tags = saputils.get_module_tags(filename, keywords = drt_keywords, debug = False)
io_types = [
"input",
"output",
"inout"
]
#
#for io in io_types:
# for port in tags["ports"][io].keys():
# print "Ports: " + port
self.assertEqual(True, True)
def test_read_slave_tags_with_params(self):
"""some verilog files have a paramter list"""
import saputils
base_dir = os.getenv("SAPLIB_BASE")
filename = base_dir + "/hdl/rtl/wishbone/slave/ddr/wb_ddr.v"
drt_keywords = [
"DRT_ID",
"DRT_FLAGS",
"DRT_SIZE"
]
tags = saputils.get_module_tags(filename, keywords = drt_keywords, debug = True)
io_types = [
"input",
"output",
"inout"
]
#
#for io in io_types:
# for port in tags["ports"][io].keys():
# print "Ports: " + port
print "\n\n\n\n\n\n"
print "module name: " + tags["module"]
print "\n\n\n\n\n\n"
self.assertEqual(True, True)
def test_read_hard_slave_tags(self):
"""try and extrapolate all info from the slave file"""
import saputils
base_dir = os.getenv("SAPLIB_BASE")
filename = base_dir + "/hdl/rtl/wishbone/slave/ddr/wb_ddr.v"
drt_keywords = [
"DRT_ID",
"DRT_FLAGS",
"DRT_SIZE"
]
tags = saputils.get_module_tags(filename, keywords = drt_keywords, debug = True)
io_types = [
"input",
"output",
"inout"
]
#
#for io in io_types:
# for port in tags["ports"][io].keys():
# print "Ports: " + port
self.assertEqual(True, True)
if __name__ == "__main__":
sys.path.append (sys.path[0] + "/../")
import saputils
unittest.main()
| [
"[email protected]"
]
| |
f7ecbb6e60d58f81414014d4eb23c770a0e6acd9 | c4a8e44b171bbfcce4773fbd5820be40d991afab | /dispatcher_sample.fcgi | 299c4ad0e1726445c0909f317429f2fd66a4824f | [
"MIT"
]
| permissive | sveetch/DjangoSveetchies | a2462c29839d60736077f647b3014396ce700f42 | 0fd4f23d601287dbfb5a93b4f9baa33481466a25 | refs/heads/master | 2021-01-01T20:48:08.824288 | 2013-03-10T12:14:56 | 2013-03-10T12:14:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | fcgi | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
FastCGI dispatcher for development environment
"""
import sys, os
sys.path.insert(0, '/home/django/py_libs') # An optionnal path where is installed some Python libs
sys.path.insert(0, '/home/django/gits/') # Path to the directory which contains 'DjangoSveetchies'
# Specify the temporary directory to use for Python Eggs
os.environ['PYTHON_EGG_CACHE'] = "/tmp"
# Set the DJANGO_SETTINGS_MODULE environment variable.
os.environ['DJANGO_SETTINGS_MODULE'] = "DjangoSveetchies.prod_settings"
from django.core.servers.fastcgi import runfastcgi
runfastcgi(method="threaded", daemonize="false")
| [
"[email protected]"
]
| |
524a64d718c9a87331dcd95f4b5511761a102a97 | 3e397609ebd59d50ed0f9928e6bd039030e35f9a | /contract_api/lambda_handler.py | 4757ef1411c05ce3dff3425d4a41156dd03276bb | []
| no_license | prashantramangupta/marketplace | d8f64462668f1bb15c37fd52c17236d7565e5ae5 | acae91d90ec8626bc79ae46168c37a4d8bbab46a | refs/heads/master | 2020-06-05T15:48:19.063615 | 2019-06-26T05:28:16 | 2019-06-26T05:28:16 | 159,120,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,511 | py | import json
import logging
import re
import traceback
from schema import Schema, And
from common.constant import NETWORKS
from common.repository import Repository
from mpe import MPE
from registry import Registry
NETWORKS_NAME = dict((NETWORKS[netId]['name'], netId) for netId in NETWORKS.keys())
db = dict((netId, Repository(net_id=netId)) for netId in NETWORKS.keys())
def request_handler(event, context):
print(event)
if 'path' not in event:
return get_response(400, "Bad Request")
try:
payload_dict = None
resp_dta = None
path = event['path'].lower()
stage = event['requestContext']['stage']
net_id = NETWORKS_NAME[stage]
if event['httpMethod'] == 'POST':
body = event['body']
if body is not None and len(body) > 0:
payload_dict = json.loads(body)
elif event['httpMethod'] == 'GET':
payload_dict = event.get('queryStringParameters')
else:
return get_response(400, "Bad Request")
if path in ["/service", "/feedback"] or path[0:4] == "/org" or path[0:5] == "/user":
obj_reg = Registry(obj_repo=db[net_id])
if "/org" == path:
resp_dta = obj_reg.get_all_org()
elif re.match("(\/service)[/]{0,1}$", path):
if payload_dict is None:
payload_dict = {}
resp_dta = obj_reg.get_all_srvcs(qry_param=payload_dict)
elif re.match("(\/org\/)[^\/]*(\/service\/)[^\/]*(\/group)[/]{0,1}$", path):
params = path.split("/")
org_id = params[2]
service_id = params[4]
resp_dta = obj_reg.get_group_info(org_id=org_id, service_id=service_id)
elif "/channels" == path:
obj_mpe = MPE(net_id=net_id, obj_repo=db[net_id])
resp_dta = obj_mpe.get_channels_by_user_address(payload_dict['user_address'],
payload_dict.get('org_id', None),
payload_dict.get('service_id', None))
elif re.match("(\/user\/)[^\/]*(\/feedback)[/]{0,1}$", path):
params = path.split("/")
user_address = params[2]
resp_dta = get_user_feedback(user_address=user_address, obj_reg=obj_reg)
elif "/feedback" == path:
resp_dta = set_user_feedback(payload_dict['feedback'], obj_reg=obj_reg, net_id=net_id)
else:
return get_response(400, "Invalid URL path.")
if resp_dta is None:
err_msg = {'status': 'failed', 'error': 'Bad Request', 'api': event['path'], 'payload': payload_dict}
response = get_response(500, err_msg)
else:
response = get_response(200, {"status": "success", "data": resp_dta})
except Exception as e:
err_msg = {"status": "failed", "error": repr(e), 'api': event['path'], 'payload': payload_dict}
response = get_response(500, err_msg)
traceback.print_exc()
return response
def check_for_blank(field):
if field is None or len(field) == 0:
return True
return False
def get_user_feedback(user_address, obj_reg):
if check_for_blank(user_address):
return []
return obj_reg.get_usr_feedbk(user_address)
def set_user_feedback(feedbk_info, obj_reg, net_id):
feedbk_recorded = False
schema = Schema([{'user_address': And(str),
'org_id': And(str),
'service_id': And(str),
'up_vote': bool,
'down_vote': bool,
'comment': And(str),
'signature': And(str)
}])
try:
feedback_data = schema.validate([feedbk_info])
feedbk_recorded = obj_reg.set_usr_feedbk(feedback_data[0], net_id=net_id)
except Exception as err:
print("Invalid Input ", err)
return None
if feedbk_recorded:
return []
return None
def get_response(status_code, message):
return {
'statusCode': status_code,
'body': json.dumps(message),
'headers': {
'Content-Type': 'application/json',
"X-Requested-With": '*',
"Access-Control-Allow-Headers": 'Access-Control-Allow-Origin, Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',
"Access-Control-Allow-Origin": '*',
"Access-Control-Allow-Methods": 'GET,OPTIONS,POST'
}
}
| [
"[email protected]"
]
| |
ec204e589862d7db078962cf5fe0c41711f5cbcb | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Logit/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Logit_Lag1Trend_30_12_20.py | b308b4cbc07f69e3c5a3e07412703d09b5786f7b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 266 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"[email protected]"
]
| |
699bfc4d77e051f4b0b9b95cde59fbb62b5cf72d | 4edbeb3e2d3263897810a358d8c95854a468c3ca | /python3/version/python_version.py | dec52afee6ab14dce005f6ae31bb0e544017de89 | [
"MIT"
]
| permissive | jtraver/dev | f505d15d45b67a59d11306cc7252114c265f388b | 2197e3443c7619b856470558b737d85fe1f77a5a | refs/heads/master | 2023-08-06T02:17:58.601861 | 2023-08-01T16:58:44 | 2023-08-01T16:58:44 | 14,509,952 | 0 | 1 | MIT | 2020-10-14T18:32:48 | 2013-11-19T00:51:19 | Python | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python3
#!/usr/bin/python
import os
import platform
import sys
import aerospike
def main():
print("\nos")
print("os.name = %s" % str(os.name))
print("sys.platform = %s" % str(sys.platform))
print("platform.platform() = %s" % str(platform.platform()))
print("\npython")
print("sys.version = %s" % str(sys.version))
print("sys.version_info = %s" % str(sys.version_info))
print("sys.version_info[0] = %s" % str(sys.version_info[0]))
print("\naerospike")
try:
print("aerospike client version is %s" % str(aerospike.__version__))
except Exception as e:
print("e = %s" % str(e))
pass
main()
| [
"[email protected]"
]
| |
bffc4998a73a001af96ff4d89986c7f07ba844b4 | 65f94b2fe3794b6fd682e52c7f4047a737cae6c7 | /env/bin/symilar | f88c0874a96d9c0b5e2366b5b1482cc352a5092d | []
| no_license | udoyen/vgg-project-challenge | 47e7e0c5352437f3df00aff9ac055dbadaadebb5 | 76a005edec6e77f9467b67bda20002c58abef7a9 | refs/heads/master | 2022-10-04T14:42:46.267458 | 2020-02-11T10:47:22 | 2020-02-11T10:47:22 | 238,899,753 | 0 | 1 | null | 2022-09-16T18:17:10 | 2020-02-07T10:45:53 | Python | UTF-8 | Python | false | false | 276 | #!/home/george/Documents/vgg-docs/vgg-project-challenge/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
]
| ||
bf1ede23da3e02e00b2cf1c77c17765bc71ab71a | b39d72ba5de9d4683041e6b4413f8483c817f821 | /GeneVisualization/ass1/Lib/site-packages/itk/itkAggregateLabelMapFilterPython.py | 0dbdcf699057bdee2b4e73071db281a38799bb72 | []
| no_license | ssalmaan/DataVisualization | d93a0afe1290e4ea46c3be5718d503c71a6f99a7 | eff072f11337f124681ce08742e1a092033680cc | refs/heads/master | 2021-03-13T05:40:23.679095 | 2020-03-11T21:37:45 | 2020-03-11T21:37:45 | 246,642,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,395 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkAggregateLabelMapFilterPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkAggregateLabelMapFilterPython', [dirname(__file__)])
except ImportError:
import _itkAggregateLabelMapFilterPython
return _itkAggregateLabelMapFilterPython
if fp is not None:
try:
_mod = imp.load_module('_itkAggregateLabelMapFilterPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkAggregateLabelMapFilterPython = swig_import_helper()
del swig_import_helper
else:
import _itkAggregateLabelMapFilterPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkInPlaceLabelMapFilterPython
import itkLabelMapFilterPython
import ITKLabelMapBasePython
import itkStatisticsLabelObjectPython
import itkPointPython
import itkFixedArrayPython
import pyBasePython
import vnl_vector_refPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import itkVectorPython
import itkIndexPython
import itkOffsetPython
import itkSizePython
import itkMatrixPython
import itkCovariantVectorPython
import vnl_matrix_fixedPython
import itkAffineTransformPython
import itkMatrixOffsetTransformBasePython
import itkArray2DPython
import itkOptimizerParametersPython
import itkArrayPython
import ITKCommonBasePython
import itkVariableLengthVectorPython
import itkDiffusionTensor3DPython
import itkSymmetricSecondRankTensorPython
import itkTransformBasePython
import itkShapeLabelObjectPython
import itkImageRegionPython
import itkLabelObjectPython
import itkLabelObjectLinePython
import itkHistogramPython
import itkSamplePython
import itkImageSourcePython
import itkImageSourceCommonPython
import itkVectorImagePython
import itkImagePython
import itkRGBAPixelPython
import itkRGBPixelPython
import itkImageToImageFilterCommonPython
def itkAggregateLabelMapFilterLM3_New():
return itkAggregateLabelMapFilterLM3.New()
def itkAggregateLabelMapFilterLM2_New():
return itkAggregateLabelMapFilterLM2.New()
class itkAggregateLabelMapFilterLM2(itkInPlaceLabelMapFilterPython.itkInPlaceLabelMapFilterLM2):
"""Proxy of C++ itkAggregateLabelMapFilterLM2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkAggregateLabelMapFilterLM2_Pointer":
"""__New_orig__() -> itkAggregateLabelMapFilterLM2_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkAggregateLabelMapFilterLM2_Pointer":
"""Clone(itkAggregateLabelMapFilterLM2 self) -> itkAggregateLabelMapFilterLM2_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_Clone(self)
__swig_destroy__ = _itkAggregateLabelMapFilterPython.delete_itkAggregateLabelMapFilterLM2
def cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM2 *":
"""cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM2"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkAggregateLabelMapFilterLM2
Create a new object of the class itkAggregateLabelMapFilterLM2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkAggregateLabelMapFilterLM2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkAggregateLabelMapFilterLM2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkAggregateLabelMapFilterLM2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkAggregateLabelMapFilterLM2.Clone = new_instancemethod(_itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_Clone, None, itkAggregateLabelMapFilterLM2)
itkAggregateLabelMapFilterLM2_swigregister = _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_swigregister
itkAggregateLabelMapFilterLM2_swigregister(itkAggregateLabelMapFilterLM2)
def itkAggregateLabelMapFilterLM2___New_orig__() -> "itkAggregateLabelMapFilterLM2_Pointer":
"""itkAggregateLabelMapFilterLM2___New_orig__() -> itkAggregateLabelMapFilterLM2_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2___New_orig__()
def itkAggregateLabelMapFilterLM2_cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM2 *":
"""itkAggregateLabelMapFilterLM2_cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM2"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM2_cast(obj)
class itkAggregateLabelMapFilterLM3(itkInPlaceLabelMapFilterPython.itkInPlaceLabelMapFilterLM3):
"""Proxy of C++ itkAggregateLabelMapFilterLM3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkAggregateLabelMapFilterLM3_Pointer":
"""__New_orig__() -> itkAggregateLabelMapFilterLM3_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkAggregateLabelMapFilterLM3_Pointer":
"""Clone(itkAggregateLabelMapFilterLM3 self) -> itkAggregateLabelMapFilterLM3_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_Clone(self)
__swig_destroy__ = _itkAggregateLabelMapFilterPython.delete_itkAggregateLabelMapFilterLM3
def cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM3 *":
"""cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM3"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkAggregateLabelMapFilterLM3
Create a new object of the class itkAggregateLabelMapFilterLM3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkAggregateLabelMapFilterLM3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkAggregateLabelMapFilterLM3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkAggregateLabelMapFilterLM3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkAggregateLabelMapFilterLM3.Clone = new_instancemethod(_itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_Clone, None, itkAggregateLabelMapFilterLM3)
itkAggregateLabelMapFilterLM3_swigregister = _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_swigregister
itkAggregateLabelMapFilterLM3_swigregister(itkAggregateLabelMapFilterLM3)
def itkAggregateLabelMapFilterLM3___New_orig__() -> "itkAggregateLabelMapFilterLM3_Pointer":
"""itkAggregateLabelMapFilterLM3___New_orig__() -> itkAggregateLabelMapFilterLM3_Pointer"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3___New_orig__()
def itkAggregateLabelMapFilterLM3_cast(obj: 'itkLightObject') -> "itkAggregateLabelMapFilterLM3 *":
"""itkAggregateLabelMapFilterLM3_cast(itkLightObject obj) -> itkAggregateLabelMapFilterLM3"""
return _itkAggregateLabelMapFilterPython.itkAggregateLabelMapFilterLM3_cast(obj)
def aggregate_label_map_filter(*args, **kwargs):
"""Procedural interface for AggregateLabelMapFilter"""
import itk
instance = itk.AggregateLabelMapFilter.New(*args, **kwargs)
return instance.__internal_call__()
def aggregate_label_map_filter_init_docstring():
import itk
import itkTemplate
if isinstance(itk.AggregateLabelMapFilter, itkTemplate.itkTemplate):
aggregate_label_map_filter.__doc__ = itk.AggregateLabelMapFilter.values()[0].__doc__
else:
aggregate_label_map_filter.__doc__ = itk.AggregateLabelMapFilter.__doc__
| [
"[email protected]"
]
| |
19b0f1f8a7c09dc649e0cad037b8f1d8ebb8b242 | 81579ecd0678d652bbb57ff97529631fcfb74b12 | /corehq/motech/openmrs/tests/test_repeater_helpers.py | 83282f2a7427ec1fa865d3dd1356587c444939b9 | [
"BSD-3-Clause"
]
| permissive | dungeonmaster51/commcare-hq | 64fece73671b03c1bca48cb9d1a58764d92796ea | 1c70ce416564efa496fb4ef6e9130c188aea0f40 | refs/heads/master | 2022-12-03T21:50:26.035495 | 2020-08-11T07:34:59 | 2020-08-11T07:34:59 | 279,546,551 | 1 | 0 | BSD-3-Clause | 2020-07-31T06:13:03 | 2020-07-14T09:51:32 | Python | UTF-8 | Python | false | false | 1,089 | py | from unittest import skip
from nose.tools import assert_regexp_matches
from corehq.motech.auth import BasicAuthManager
from corehq.motech.openmrs.repeater_helpers import generate_identifier
from corehq.motech.requests import Requests
DOMAIN = 'openmrs-test'
BASE_URL = 'https://demo.mybahmni.org/openmrs/'
USERNAME = 'superman'
PASSWORD = 'Admin123'
# Patient identifier type for use by the Bahmni Registration System
# https://demo.mybahmni.org/openmrs/admin/patients/patientIdentifierType.form?patientIdentifierTypeId=3
IDENTIFIER_TYPE = '81433852-3f10-11e4-adec-0800271c1b75'
@skip('Uses third-party web services')
def test_generate_identifier():
auth_manager = BasicAuthManager(USERNAME, PASSWORD)
requests = Requests(
DOMAIN,
BASE_URL,
verify=False, # demo.mybahmni.org uses a self-issued cert
auth_manager=auth_manager,
logger=dummy_logger,
)
identifier = generate_identifier(requests, IDENTIFIER_TYPE)
assert_regexp_matches(identifier, r'^BAH\d{6}$') # e.g. BAH203001
def dummy_logger(*args, **kwargs):
pass
| [
"[email protected]"
]
| |
2d9f94c5939c209e95cd90f452b218045cd65527 | 373c43096384a2ea7f351fdedc64312660a1c344 | /src/cli.py | f3ccd5fb4c85d42a6d92d16f6863a85c68bacb64 | [
"MIT"
]
| permissive | VanirLab/weever | 7ad69c76227ac0981b1dd0570e3dbae4dd67de21 | b602e90ddecb8e469a28e092da3ca7fec514e3dc | refs/heads/master | 2020-05-27T20:57:48.320430 | 2019-05-27T09:02:33 | 2019-05-27T09:02:33 | 188,788,722 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 49,577 | py | """
Implementation of weever's command line interface.
"""
import sys
import traceback
import argparse
import logging
import getpass
import typing as typ
from src.wrapper.bad_cluster import BadClusterWrapper
from src.wrapper.cluster_allocation import ClusterAllocation
from src.fat.fat_filesystem.fat_wrapper import create_fat
from src.fat.fat_filesystem.fattools import FATtools
from src.wrapper.file_slack import FileSlack
from src.metadata import Metadata
from src.wrapper.mft_slack import MftSlack
from src.wrapper.osd2 import OSD2
from src.wrapper.obso_faddr import FADDR
from src.wrapper.reserved_gdt_blocks import ReservedGDTBlocks
from src.wrapper.superblock_slack import SuperblockSlack
from src.wrapper.inode_padding import inodePadding
from src.wrapper.write_gen import write_gen
from src.wrapper.timestamp_hiding import timestampHiding
from src.wrapper.xfield_padding import xfieldPadding
LOGGER = logging.getLogger("cli")
def do_metadata(args: argparse.Namespace) -> None:
"""
handles metadata subcommand execution
:param args: argparse.Namespace
"""
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(args.metadata)
meta.info()
def do_fattools(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles fattools subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
fattool = FATtools(create_fat(device))
if args.fat:
fattool.list_fat()
elif args.info:
fattool.list_info()
elif args.list is not None:
fattool.list_directory(args.list)
def do_fileslack(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles fileslack subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.info:
slacker = FileSlack(device, Metadata(), args.dev)
slacker.info(args.destination)
if args.write:
if args.password is False:
slacker = FileSlack(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
slacker = FileSlack(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into fileslack
slacker.write(sys.stdin.buffer, args.destination)
else:
# write from files into fileslack
with open(args.file, 'rb') as fstream:
slacker.write(fstream, args.destination, args.file)
with open(args.metadata, 'wb+') as metadata_out:
slacker.metadata.write(metadata_out)
elif args.read:
# read file slack of a single hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = FileSlack(device, meta, args.dev)
slacker.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data in fileslack into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = FileSlack(device, meta, args.dev)
slacker.read_into_file(args.outfile)
elif args.clear:
# clear fileslack
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = FileSlack(device, meta, args.dev)
slacker.clear()
def do_mftslack(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles mftslack subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.info:
slacker = MftSlack(device, Metadata(), args.dev)
slacker.info(args.offset, args.limit)
if args.write:
if args.password is False:
slacker = MftSlack(device, Metadata(), args.dev, args.domirr)
else:
print("Please enter password: ")
pw = getpass.getpass()
slacker = MftSlack(device, Metadata(password=pw), args.dev, args.domirr)
if not args.file:
# write from stdin into mftslack
slacker.write(sys.stdin.buffer, offset=args.offset)
else:
# write from files into mftslack
with open(args.file, 'rb') as fstream:
slacker.write(fstream, args.file, args.offset)
with open(args.metadata, 'wb+') as metadata_out:
slacker.metadata.write(metadata_out)
elif args.read:
# read file slack of a single hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = MftSlack(device, meta, args.dev)
slacker.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data in fileslack into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = MftSlack(device, meta, args.dev)
slacker.read_into_file(args.outfile)
elif args.clear:
# clear fileslack
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slacker = MftSlack(device, meta, args.dev)
slacker.clear()
def do_addcluster(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles addcluster subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
allocator = ClusterAllocation(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
allocator = ClusterAllocation(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into additional clusters
allocator.write(sys.stdin.buffer, args.destination)
else:
# write from files into additional clusters
with open(args.file, 'rb') as fstream:
allocator.write(fstream, args.destination, args.file)
with open(args.metadata, 'wb+') as metadata_out:
allocator.metadata.write(metadata_out)
elif args.read:
# read file slack of a single hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = ClusterAllocation(device, meta, args.dev)
allocator.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data from additional clusters into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = ClusterAllocation(device, meta, args.dev)
allocator.read_into_file(args.outfile)
elif args.clear:
# clear additional clusters
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = ClusterAllocation(device, meta, args.dev)
allocator.clear()
def do_badcluster(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
hanles badcluster subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
allocator = BadClusterWrapper(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
allocator = BadClusterWrapper(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into bad clusters
allocator.write(sys.stdin.buffer)
else:
# write from file into bad cluster
with open(args.file, 'rb') as fstream:
allocator.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
allocator.metadata.write(metadata_out)
elif args.read:
# read bad cluster to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = BadClusterWrapper(device, meta, args.dev)
allocator.read(sys.stdout.buffer)
elif args.outfile:
# read hidden data from bad cluster into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = BadClusterWrapper(device, meta, args.dev)
allocator.read_into_file(args.outfile)
elif args.clear:
# clear bad cluster
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
allocator = BadClusterWrapper(device, meta, args.dev)
allocator.clear()
def do_reserved_gdt_blocks(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles reserved_gdt_blocks subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
reserve = ReservedGDTBlocks(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
reserve = ReservedGDTBlocks(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into reserved GDT blocks
reserve.write(sys.stdin.buffer)
else:
# write from files into reserved GDT blocks
with open(args.file, 'rb') as fstream:
reserve.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
reserve.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.read_into_file(args.outfile)
elif args.clear:
# clear reserved GDT blocks
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
reserve = ReservedGDTBlocks(device, meta, args.dev)
reserve.info()
def do_superblock_slack(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles superblock_slack subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
slack = SuperblockSlack(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
slack = SuperblockSlack(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into superblock slack
slack.write(sys.stdin.buffer)
else:
# write from files into superblock slack
with open(args.file, 'rb') as fstream:
slack.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
slack.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.read_into_file(args.outfile)
elif args.clear:
# clear superblock slack
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
slack = SuperblockSlack(device, meta, args.dev)
slack.info()
def do_osd2(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles osd2 subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
osd2 = OSD2(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
osd2 = OSD2(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into osd2 fields
osd2.write(sys.stdin.buffer)
else:
# write from files into osd2 fields
with open(args.file, 'rb') as fstream:
osd2.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
osd2.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.read_into_file(args.outfile)
elif args.clear:
# clear osd2 fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
osd2 = OSD2(device, meta, args.dev)
osd2.info()
def do_obso_faddr(args: argparse.Namespace, device: typ.BinaryIO) -> None:
"""
handles obso_faddr subcommand execution
:param args: argparse.Namespace
:param device: stream of the filesystem
"""
if args.write:
if args.password is False:
faddr = FADDR(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
faddr = FADDR(device, Metadata(password=pw), args.dev)
if not args.file:
# write from stdin into faddr fields
faddr.write(sys.stdin.buffer)
else:
# write from files into faddr fields
with open(args.file, 'rb') as fstream:
faddr.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
faddr.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.clear()
elif args.info:
# show info
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
faddr = FADDR(device, meta, args.dev)
faddr.info()
def do_inode_padding(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
ipad = inodePadding(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
ipad = inodePadding(device, Metadata(password=pw), args.dev)
if not args.file:
ipad.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
ipad.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
ipad.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
ipad = inodePadding(device, meta, args.dev)
ipad.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
ipad = inodePadding(device, meta, args.dev)
ipad.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
ipad = inodePadding(device, meta, args.dev)
ipad.clear()
def do_write_gen(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
wgen = write_gen(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
wgen = write_gen(device, Metadata(password=pw), args.dev)
if not args.file:
wgen.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
wgen.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
wgen.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
wgen = write_gen(device, meta, args.dev)
wgen.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password = pw)
meta.read(metadata_file)
wgen = write_gen(device, meta, args.dev)
wgen.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
wgen = write_gen(device, meta, args.dev)
wgen.clear()
def do_timestamp_hiding(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
timestamp = timestampHiding(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
timestamp = timestampHiding(device, Metadata(password=pw), args.dev)
if not args.file:
timestamp.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
timestamp.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
timestamp.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
timestamp = timestampHiding(device, meta, args.dev)
timestamp.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
timestamp = timestampHiding(device, meta, args.dev)
timestamp.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
timestamp = timestampHiding(device, meta, args.dev)
timestamp.clear()
def do_xfield_padding(args: argparse.Namespace, device: typ.BinaryIO) -> None:
if args.write:
if args.password is False:
xfield = xfieldPadding(device, Metadata(), args.dev)
else:
print("Please enter password: ")
pw = getpass.getpass()
xfield = xfieldPadding(device, Metadata(password=pw), args.dev)
if not args.file:
xfield.write(sys.stdin.buffer)
else:
with open(args.file, 'rb') as fstream:
xfield.write(fstream, args.file)
with open(args.metadata, 'wb+') as metadata_out:
xfield.metadata.write(metadata_out)
elif args.read:
# read hidden file to stdout
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
xfield = xfieldPadding(device, meta, args.dev)
xfield.read(sys.stdout.buffer)
elif args.outfile:
# read hidden file into outfile
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
xfield = xfieldPadding(device, meta, args.dev)
xfield.read_into_file(args.outfile)
elif args.clear:
# clear faddr fields
with open(args.metadata, 'rb') as metadata_file:
if args.password is False:
meta = Metadata()
else:
print("Please enter password: ")
pw = getpass.getpass()
meta = Metadata(password=pw)
meta.read(metadata_file)
xfield = xfieldPadding(device, meta, args.dev)
xfield.clear()
def build_parser() -> argparse.ArgumentParser:
"""
Get the cli parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description='Toolkit for filesystem based data hiding techniques.')
# TODO: Maybe this option should be required for hiding technique
# subcommand but not for metadata.... needs more thoughs than I
# currently have
parser.set_defaults(which='no_arguments')
parser.add_argument('-d', '--device', dest='dev', required=False, help='Path to filesystem')
parser.add_argument('-p', '--password', dest='password', action='store_true', required=False, help='Password for encryption of metadata')
# TODO Maybe we should provide a more fine grained option to choose between different log levels
parser.add_argument('--verbose', '-v', action='count', help="Increase verbosity. Use it multiple times to increase verbosity further.")
subparsers = parser.add_subparsers(help='Hiding techniques sub-commands')
# FAT Tools
fatt = subparsers.add_parser('fattools', help='List statistics about FAT filesystem')
fatt.set_defaults(which='fattools')
fatt.add_argument('-l', '--ls', dest='list', type=int, metavar='CLUSTER_ID', help='List files under cluster id. Use 0 for root directory')
fatt.add_argument('-f', '--fat', dest='fat', action='store_true', help='List content of FAT')
fatt.add_argument('-i', '--info', dest='info', action='store_true', help='Show some information about the filesystem')
# Metadata info
metadata = subparsers.add_parser('metadata', help='list information about a metadata file')
metadata.set_defaults(which='metadata')
metadata.add_argument('-m', '--metadata', dest='metadata', type=argparse.FileType('rb'), help="filepath to metadata file")
# FileSlack
fileslack = subparsers.add_parser('fileslack', help='Operate on file slack')
fileslack.set_defaults(which='fileslack')
fileslack.add_argument('-d', '--dest', dest='destination', action='append', required=False, help='absolute path to file or directory on filesystem, directories will be parsed recursively')
fileslack.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
fileslack.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from slackspace to stdout')
fileslack.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from slackspace to OUTFILE')
fileslack.add_argument('-w', '--write', dest='write', action='store_true', help='write to slackspace')
fileslack.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear slackspace')
fileslack.add_argument('-i', '--info', dest='info', action='store_true', help='print file slack information of given files')
fileslack.add_argument('file', metavar='FILE', nargs='?', help="File to write into slack space, if nothing provided, use stdin")
# MftSlack
mftslack = subparsers.add_parser('mftslack', help='Operate on mft slack')
mftslack.set_defaults(which='mftslack')
mftslack.add_argument('-s', '--seek', dest='offset', default=0, type=int, required=False, help='sector offset to the start of the first mft entry to be used when hiding data. To avoid overwriting data use the "Next position" provided by the last execution of this module.')
mftslack.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
mftslack.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from slackspace to stdout')
mftslack.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from slackspace to OUTFILE')
mftslack.add_argument('-w', '--write', dest='write', action='store_true', help='write to slackspace')
mftslack.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear slackspace')
mftslack.add_argument('-d', '--domirr', dest='domirr', action='store_true', help='write copy of data to $MFTMirr. Avoids detection with chkdsk')
mftslack.add_argument('-i', '--info', dest='info', action='store_true', help='print mft slack information of entries in limit')
mftslack.add_argument('-l', '--limit', dest='limit', default=-1, type=int, required=False, help='limit the amount of mft entries to print information for when using the "--info" switch')
mftslack.add_argument('file', metavar='FILE', nargs='?', help="File to write into slack space, if nothing provided, use stdin")
# Additional Cluster Allocation
addcluster = subparsers.add_parser('addcluster', help='Allocate more clusters for a file')
addcluster.set_defaults(which='addcluster')
addcluster.add_argument('-d', '--dest', dest='destination', required=False, help='absolute path to file or directory on filesystem')
addcluster.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
addcluster.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from allocated clusters to stdout')
addcluster.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from allocated clusters to OUTFILE')
addcluster.add_argument('-w', '--write', dest='write', action='store_true', help='write to additional allocated clusters')
addcluster.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear allocated clusters')
addcluster.add_argument('file', metavar='FILE', nargs='?', help="File to write into additionally allocated clusters, if nothing provided, use stdin")
# Additional Cluster Allocation
badcluster = subparsers.add_parser('badcluster', help='Allocate more clusters for a file')
badcluster.set_defaults(which='badcluster')
badcluster.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
badcluster.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from allocated clusters to stdout')
badcluster.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from allocated clusters to OUTFILE')
badcluster.add_argument('-w', '--write', dest='write', action='store_true', help='write to additional allocated clusters')
badcluster.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear allocated clusters')
badcluster.add_argument('file', metavar='FILE', nargs='?', help="File to write into additionally allocated clusters, if nothing provided, use stdin")
# Reserved GDT blocks
reserved_gdt_blocks = subparsers.add_parser('reserved_gdt_blocks', help='hide data in reserved GDT blocks')
reserved_gdt_blocks.set_defaults(which='reserved_gdt_blocks')
reserved_gdt_blocks.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
reserved_gdt_blocks.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from reserved GDT blocks to stdout')
reserved_gdt_blocks.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from reserved GDT blocks to OUTFILE')
reserved_gdt_blocks.add_argument('-w', '--write', dest='write', action='store_true', help='write to reserved GDT blocks')
reserved_gdt_blocks.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear reserved GDT blocks')
reserved_gdt_blocks.add_argument('-i', '--info', dest='info', action='store_true', help='show infor1mation about reserved gdt')
reserved_gdt_blocks.add_argument('file', metavar='FILE', nargs='?', help="File to write into reserved GDT blocks, if nothing provided, use stdin")
# Superblock slack
superblock_slack = subparsers.add_parser('superblock_slack', help='hide data in superblock slack')
superblock_slack.set_defaults(which='superblock_slack')
superblock_slack.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
superblock_slack.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from superblock slack to stdout')
superblock_slack.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from superblock slack to OUTFILE')
superblock_slack.add_argument('-w', '--write', dest='write', action='store_true', help='write to superblock slack')
superblock_slack.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear superblock slack')
superblock_slack.add_argument('-i', '--info', dest='info', action='store_true', help='show information about superblock')
superblock_slack.add_argument('file', metavar='FILE', nargs='?', help="File to write into superblock slack, if nothing provided, use stdin")
# OSD2
osd2 = subparsers.add_parser('osd2', help='hide data in osd2 fields of inodes')
osd2.set_defaults(which='osd2')
osd2.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
osd2.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from osd2 fields to stdout')
osd2.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from osd2 fields to OUTFILE')
osd2.add_argument('-w', '--write', dest='write', action='store_true', help='write to osd2 fields')
osd2.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear osd2 fields')
osd2.add_argument('-i', '--info', dest='info', action='store_true', help='show information about osd2')
osd2.add_argument('file', metavar='FILE', nargs='?', help="File to write into osd2 fields, if nothing provided, use stdin")
# obso_faddr
obso_faddr = subparsers.add_parser('obso_faddr', help='hide data in obso_faddr fields of inodes')
obso_faddr.set_defaults(which='obso_faddr')
obso_faddr.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
obso_faddr.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from obso_faddr fields to stdout')
obso_faddr.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from obso_faddr fields to OUTFILE')
obso_faddr.add_argument('-w', '--write', dest='write', action='store_true', help='write to obso_faddr fields')
obso_faddr.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear obso_faddr fields')
obso_faddr.add_argument('-i', '--info', dest='info', action='store_true', help='show information about obso_faddr')
obso_faddr.add_argument('file', metavar='FILE', nargs='?', help="File to write into obso_faddr fields, if nothing provided, use stdin")
# inode Padding
inode_padding = subparsers.add_parser('inode_padding', help='hide data in padding fields of inodes')
inode_padding.set_defaults(which='inode_padding')
inode_padding.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
inode_padding.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from padding fields to stdout')
inode_padding.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from padding fields to OUTFILE')
inode_padding.add_argument('-w', '--write', dest='write', action='store_true', help='write to padding fields')
inode_padding.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear padding fields')
inode_padding.add_argument('file', metavar='FILE', nargs='?', help="File to write into padding fields, if nothing provided, use stdin")
# write gen
write_gen = subparsers.add_parser('write_gen', help='hide data in write_gen fields of inodes')
write_gen.set_defaults(which='write_gen')
write_gen.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
write_gen.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from write_gen fields to stdout')
write_gen.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from write_gen fields to OUTFILE')
write_gen.add_argument('-w', '--write', dest='write', action='store_true', help='write to write_gen fields')
write_gen.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear write_gen fields')
write_gen.add_argument('file', metavar='FILE', nargs='?', help="File to write into write_gen fields, if nothing provided, use stdin")
# timestamp hiding
timestamp = subparsers.add_parser('timestamp_hiding', help='hide data in inode timestamps')
timestamp.set_defaults(which='timestamp_hiding')
timestamp.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
timestamp.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from timestamps to stdout')
timestamp.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from timestamps to OUTFILE')
timestamp.add_argument('-w', '--write', dest='write', action='store_true', help='write to timestamps')
timestamp.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear timestamps')
timestamp.add_argument('file', metavar='FILE', nargs='?', help="File to write into timestamps, if nothing provided, use stdin")
# xfield padding
xfield = subparsers.add_parser('xfield_padding', help='hide data in inode extended fields')
xfield.set_defaults(which='xfield_padding')
xfield.add_argument('-m', '--metadata', dest='metadata', required=True, help='Metadata file to use')
xfield.add_argument('-r', '--read', dest='read', action='store_true', help='read hidden data from extended fields to stdout')
xfield.add_argument('-o', '--outfile', dest='outfile', metavar='OUTFILE', help='read hidden data from extended fields to OUTFILE')
xfield.add_argument('-w', '--write', dest='write', action='store_true', help='write to extended fields')
xfield.add_argument('-c', '--clear', dest='clear', action='store_true', help='clear extended fields')
xfield.add_argument('file', metavar='FILE', nargs='?', help="File to write into extended fields, if nothing provided, use stdin")
return parser
def main():
# set exception handler
sys.excepthook = general_excepthook
# Parse cli arguments
parser = build_parser()
args = parser.parse_args()
# Set logging level (verbosity)
if args.verbose is None: args.verbose = 0
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
if args.verbose > 2:
fish = """
.|_-
___.-´ /_.
.--´` `´`-,/ .
..--.-´-. ´-. /|
(o( o( o ) ./.
` ´ -
( `. /
-....-- .\ \--..- \\
`--´ -.-´ \.-
\|
"""
LOGGER.debug(fish)
LOGGER.debug("Thank you for debugging so hard! We know it is "
"a mess. So, here is a friend, who will support you :)")
# if 'metadata' was chosen
if args.which == 'no_arguments':
parser.print_help()
elif args.which == 'metadata':
do_metadata(args)
else:
with open(args.dev, 'rb+') as device:
# if 'fattools' was chosen
if args.which == "fattools":
do_fattools(args, device)
# if 'fileslack' was chosen
if args.which == 'fileslack':
do_fileslack(args, device)
# if 'mftslack' was chosen
if args.which == 'mftslack':
do_mftslack(args, device)
# if 'addcluster' was chosen
if args.which == 'addcluster':
do_addcluster(args, device)
# if 'badcluster' was chosen
if args.which == 'badcluster':
do_badcluster(args, device)
# if 'reserved_gdt_blocks' was chosen
if args.which == 'reserved_gdt_blocks':
do_reserved_gdt_blocks(args, device)
# if 'osd2' was chosen
if args.which == "osd2":
do_osd2(args, device)
# if 'obso_faddr' was chosen
if args.which == "obso_faddr":
do_obso_faddr(args, device)
# if 'inode_padding' was chosen
if args.which == "inode_padding":
do_inode_padding(args, device)
# if 'timestamp_hiding' was chosen
if args.which == "timestamp_hiding":
do_timestamp_hiding(args, device)
# if 'xfield_padding' was chosen
if args.which == "xfield_padding":
do_xfield_padding(args, device)
# if 'write_gen' was chosen
if args.which == "write_gen":
do_write_gen(args, device)
# if 'superblock_slack' was chosen
if args.which == 'superblock_slack':
do_superblock_slack(args,device)
def general_excepthook(errtype, value, tb):
"""
This function serves as a general exception handler, who catches all
exceptions, that were not handled at a higher lever
"""
LOGGER.critical("Error: %s: %s.", errtype, value)
LOGGER.info("".join(traceback.format_exception(type, value, tb)))
sys.exit(1)
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
dae9dc485e3fb180f377368fb642b0eeeb1004c6 | 1640189b5bf78114e2749a8ed1216e099bae9814 | /src/xmlsec/rsa_x509_pem/pyasn1/debug.py | 5aa42ced36ef65aadacddb629cebd74977b9d1a4 | [
"BSD-2-Clause",
"BSD-3-Clause"
]
| permissive | hfalcic/pyXMLSecurity | fb69cce12c1b417928d85b91a4c3dc87f46935ec | b29a68e6d21a0485b9190be45d532b9042fdc918 | refs/heads/master | 2020-04-03T13:19:13.016532 | 2014-07-08T17:57:55 | 2014-07-08T17:57:55 | 21,471,398 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | import sys
from .compat.octets import octs2ints
from . import error
from . import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr.write
def __init__(self, *flags):
self._flags = flagNone
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
| [
"[email protected]"
]
| |
c9cbfca3f4c84cb5e219730e43194e7238cda653 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/358/usersdata/296/102792/submittedfiles/estatistica.py | 79ada402f0ec9ec03a71780b75717f4fa32662f5 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
def media(lista):
media = sum(lista)/len(lista)
return media
def desvio_padrao(lista):
somatorio = 0
for i in range (0,len(lista),1):
somatorio = ((media(lista)-lista[i])**2) + somatorio
desvio = (somatorio/(n-1))**0.5
return desvio
m = int(input("Digite o número da lista: "))
n = int(input("Digite o número de elementos de cada lista: "))
matriz=[]
for i in range (0,m,1):
matriz_linha=[]
for j in range (0,n,1):
matriz_linha.append(int(input("Digite o elemento (%d,%d): "%(i+1,j+1))))
matriz.append(matriz_linha)
for i in range (0,m,1):
print(media(matriz[i]))
print("%.2f"%(desvio_padrao(matriz[i])))
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas. | [
"[email protected]"
]
| |
9cab9dc37c46f3af6d44d688dd5c03fcf4425162 | 10c459a49cbc8ee2dc3bc2a8353c48b5a96f0c1d | /AI/nai_bayes.py | 131804de264e30fc0df16076a4ac00543533cbaf | []
| no_license | alinzel/Demo | 1a5d0e4596ab4c91d7b580da694b852495c4ddcc | cc22bbcdbd77190014e9c26e963abd7a9f4f0829 | refs/heads/master | 2020-03-10T22:26:30.247695 | 2018-04-15T15:37:28 | 2018-04-15T15:37:28 | 129,619,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,994 | py | import numpy as np
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from time import time
from pprint import pprint
import matplotlib.pyplot as plt
import matplotlib as mpl
def make_test(classfier):
print('分类器:', classfier)
alpha_can = np.logspace(-3, 2, 10)
model = GridSearchCV(classfier, param_grid={'alpha': alpha_can}, cv=5)
model.set_params(param_grid={'alpha': alpha_can})
t_start = time()
model.fit(x_train, y_train)
t_end = time()
t_train = (t_end - t_start) / (5 * alpha_can.size)
print('5折交叉验证的训练时间为:%.3f秒/(5*%d)=%.3f秒' % ((t_end - t_start), alpha_can.size, t_train))
print('最优超参数为:', model.best_params_)
t_start = time()
y_hat = model.predict(x_test)
t_end = time()
t_test = t_end - t_start
print('测试时间:%.3f秒' % t_test)
acc = metrics.accuracy_score(y_test, y_hat)
print('测试集准确率:%.2f%%' % (100 * acc))
name = str(classfier).split('(')[0]
index = name.find('Classifier')
if index != -1:
name = name[:index] # 去掉末尾的Classifier
return t_train, t_test, 1 - acc, name
if __name__ == "__main__":
remove = ('headers', 'footers', 'quotes')
categories = 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space' # 选择四个类别进行分类
# 下载数据
data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=0, remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=0, remove=remove)
print('训练集包含的文本数目:', len(data_train.data))
print('测试集包含的文本数目:', len(data_test.data))
print('训练集和测试集使用的%d个类别的名称:' % len(categories))
categories = data_train.target_names
pprint(categories)
y_train = data_train.target
y_test = data_test.target
print(' -- 前10个文本 -- ')
for i in np.arange(10):
print('文本%d(属于类别 - %s):' % (i + 1, categories[y_train[i]]))
print(data_train.data[i])
print('\n\n')
# tf-idf处理
vectorizer = TfidfVectorizer(input='content', stop_words='english', max_df=0.5, sublinear_tf=True)
x_train = vectorizer.fit_transform(data_train.data)
x_test = vectorizer.transform(data_test.data)
print('训练集样本个数:%d,特征个数:%d' % x_train.shape)
print('停止词:\n', end=' ')
#pprint(vectorizer.get_stop_words())
feature_names = np.asarray(vectorizer.get_feature_names())
# 比较分类器结果
clfs = (MultinomialNB(), # 0.87(0.017), 0.002, 90.39%
BernoulliNB(), # 1.592(0.032), 0.010, 88.54%
)
result = []
for clf in clfs:
r = make_test(clf)
result.append(r)
print('\n')
result = np.array(result)
time_train, time_test, err, names = result.T
time_train = time_train.astype(np.float)
time_test = time_test.astype(np.float)
err = err.astype(np.float)
x = np.arange(len(time_train))
mpl.rcParams['font.sans-serif'] = ['simHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(10, 7), facecolor='w')
ax = plt.axes()
b1 = ax.bar(x, err, width=0.25, color='#77E0A0')
ax_t = ax.twinx()
b2 = ax_t.bar(x + 0.25, time_train, width=0.25, color='#FFA0A0')
b3 = ax_t.bar(x + 0.5, time_test, width=0.25, color='#FF8080')
plt.xticks(x + 0.5, names)
plt.legend([b1[0], b2[0], b3[0]], ('错误率', '训练时间', '测试时间'), loc='upper left', shadow=True)
plt.title('新闻组文本数据不同分类器间的比较', fontsize=18)
plt.xlabel('分类器名称')
plt.grid(True)
plt.tight_layout(2)
plt.show()
| [
"[email protected]"
]
| |
080a6e7e8f5e0c897c92846f23df703ff1cf81f0 | a8750439f200e4efc11715df797489f30e9828c6 | /LeetCodeContests/87/845_longest_mountain.py | 141d5018c884992b88f6afeddcd2fd5ae122f0db | []
| no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | '''
Let's call any (contiguous) subarray B (of A) a mountain if the following properties hold:
B.length >= 3
There exists some 0 < i < B.length - 1 such that B[0] < B[1] < ... B[i-1] < B[i] > B[i+1] > ... > B[B.length - 1]
(Note that B could be any subarray of A, including the entire array A.)
Given an array A of integers, return the length of the longest mountain.
Return 0 if there is no mountain.
Example 1:
Input: [2,1,4,7,3,2,5]
Output: 5
Explanation: The largest mountain is [1,4,7,3,2] which has length 5.
Example 2:
Input: [2,2,2]
Output: 0
Explanation: There is no mountain.
Note:
0 <= A.length <= 10000
0 <= A[i] <= 10000
'''
class Solution:
def longestMountain(self, A):
"""
:type A: List[int]
:rtype: int
"""
size = len(A)
ans = 0
for i in range(1, size-1):
if A[i] > A[i-1] and A[i] > A[i+1]:
l = i - 1
r = i + 1
while l > 0 and A[l] > A[l-1]: l-= 1
while r < size-1 and A[r] > A[r+1]: r +=1
ans = max(ans, r - l + 1)
return ans
sol = Solution()
print(sol.longestMountain([0,1,2,3,4,5,4,3,2,1,0]))
| [
"[email protected]"
]
| |
681440009127bf5750638d9a87a4155477b2fda3 | 43b34ed0be64771f236c086f716bc6a92ae3db32 | /kt_ph_n.py | 8073dcefb011746731594e8d83f99505915ad414 | []
| no_license | elonca/LWB-benchmark-generator | ad5b6dc5e591941184056476db3ad13f01900879 | 7a7f28800f7574c98a3883f6edccad727dd509bc | refs/heads/main | 2023-07-28T01:42:22.532324 | 2021-09-16T07:22:56 | 2021-09-16T07:22:56 | 407,061,367 | 0 | 0 | null | 2021-09-16T07:12:38 | 2021-09-16T07:12:37 | null | UTF-8 | Python | false | false | 135 | py | from k_ph_n import *
def kt_ph_n(n):
return str(kt_ph_n_f(n))[1:-1]
def kt_ph_n_f(n):
return left(n) |IMPLIES| Dia(right(n))
| [
"[email protected]"
]
| |
16b25ff5a3aed45017c060619a98e4dfce6a60d6 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_sp_outer/Jobs/Pc/Pc_neut_neut_inner1_outer4/Pc_neut_neut_inner1_outer4.py | 416385fcff412563b8446a40888f5125d9823323 | []
| no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 5,279 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='Pc_neut_neut_inner1_outer4'
#For crystals here, all cubic and centred at centre
insize=1
#number of TVs in each dir central mol is from edge of inner region
outsize=4
mols_cen=['Pc_mola_neut_aniso_cifstruct_chelpg.xyz','Pc_molb_neut_aniso_cifstruct_chelpg.xyz']
mols_sur=['Pc_mola_neut_aniso_cifstruct_chelpg.xyz','Pc_molb_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_Pc_mola_neut.xyz','sp_Pc_molb_neut.xyz']
#From cif:
'''
Pc
_cell_length_a 7.900
_cell_length_b 6.060
_cell_length_c 16.010
_cell_angle_alpha 101.90
_cell_angle_beta 112.60
_cell_angle_gamma 85.80
_cell_volume 692.384
'''
#Get translation vectors:
a=7.900/0.5291772109217
b=6.060/0.5291772109217
c=16.010/0.5291772109217
alpha=101.90*(pi/180)
beta=112.60*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=692.384/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
print 'Job Completed Successfully.'
| [
"[email protected]"
]
| |
5fbfc72f14ae2926b33488a6f8779cdf247fa0b7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_397/ch12_2020_03_04_11_23_47_173814.py | 6f4b82bd91b75fe7a2fadd6fbd05585a048320bf | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | def resolve_equacao_1o_grau (a, b):
X=(0-b)/a
return X | [
"[email protected]"
]
| |
894482ee3334014d91285e7f29af8f4772c1e0bf | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/tools/cr/main.py | dced8cd4069ceea9d47ee5b9b17ca6fc164b8c81 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown"
]
| permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 3,092 | py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium cr tool main module.
Holds the main function and all it's support code.
"""
import os
import sys
import cr
import cr.auto.user
import cr.autocomplete
import cr.loader
_CONTACT = '[email protected]'
def Main():
"""Chromium cr tool main function.
This is the main entry point of the cr tool, it finds and loads all the
plugins, creates the context and then activates and runs the specified
command.
"""
# Add the users plugin dir to the cr.auto.user package scan
user_path = os.path.expanduser(os.path.join('~', '.config', 'cr'))
cr.auto.user.__path__.append(user_path)
cr.loader.Scan()
# Build the command context
context = cr.Context(
description='The chrome dev build tool.',
epilog='Contact ' + _CONTACT + ' if you have issues with this tool.',
)
# Install the sub-commands
for command in cr.Command.Plugins():
context.AddSubParser(command)
# test for the special autocomplete command
if context.autocompleting:
# After plugins are loaded so pylint: disable=g-import-not-at-top
cr.autocomplete.Complete(context)
return
# Speculative argument processing to add config specific args
context.ParseArgs(True)
cr.plugin.Activate(context)
# At this point we should know what command we are going to use
command = cr.Command.GetActivePlugin(context)
# Do some early processing, in case it changes the build dir
if command:
command.EarlyArgProcessing(context)
# Update the activated set again, in case the early processing changed it
cr.plugin.Activate(context)
# Load the build specific configuration
found_build_dir = cr.base.client.LoadConfig(context)
# Final processing or arguments
context.ParseArgs()
cr.plugin.Activate(context)
# If we did not get a command before, it might have been fixed.
if command is None:
command = cr.Command.GetActivePlugin(context)
# If the verbosity level is 3 or greater, then print the environment here
if context.verbose >= 3:
context.DumpValues(context.verbose > 3)
if command is None:
print context.Substitute('No command specified.')
exit(1)
if command.requires_build_dir:
if not found_build_dir:
if not context.Find('CR_OUT_FULL'):
print context.Substitute(
'No build directory specified. Please use cr init to make one.')
else:
print context.Substitute(
'Build {CR_BUILD_DIR} not a valid build directory')
exit(1)
if context.Find('CR_VERSION') != cr.base.client.VERSION:
print context.Substitute(
'Build {CR_BUILD_DIR} is for the wrong version of cr')
print 'Please run cr init to reset it'
exit(1)
cr.Platform.Prepare(context)
if context.verbose >= 1:
print context.Substitute(
'Running cr ' + command.name + ' for {CR_BUILD_DIR}')
# Invoke the given command
command.Run(context)
if __name__ == '__main__':
sys.exit(Main())
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.