blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41ce20e169fcbe87426ff7cea186435b73c50ae0 | 9a05e1e8c950b091124d805ea70f24d2837b827c | /daydayup/cema_python/oneday/1_2change1.py | 550314a509fd5c9dc9babee41a9aed8f25204e44 | [] | no_license | fanzongpeng/mywork | 20676a9fe0e0599461a756ad194e4bd35aad4668 | aa6d044bbab3c0288de48888b2cc7dbd7785c91b | refs/heads/master | 2022-05-31T06:03:26.826914 | 2020-04-30T09:50:22 | 2020-04-30T09:50:22 | 257,189,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | my_str = "I love Python"
my_list = ["python", "java", "lanuage", "age"]
my_list2 = [24, 12, 2.3, 9.7]
my_tuple = ("python", 33, "java", 8.8)
my_dict = {"name": "linda", "age": 88}
my_list1 = ['a', 'a', 1, 1, 2, 3]
my_set = {1, 2, 3}
a = 10
print(type(a))
# 强制转换从int到str
a1 = str(a)
print(type(a1))
# str 转 int
print(type(int(a1)))
# list与tuple元组转换
print(tuple(my_list))
print(list(my_tuple))
# 列表转成set变成不重复的
print(set(my_list1))
# 字典类型转成set只有key值
print(set(my_dict))
# 字典转成列表,key,value可以单转
print(list(my_dict.values()))
print(list(my_dict))
# my_tuple1 = ('one', 1), ('two', 2), ('three', 3)
# my_list_tuple = [('one', 1), ('two', 2), ('three', 3)]
# # print(my_tuple1)
# print(type(my_list_tuple))
# print(dict(my_list_tuple))
| [
"18210023228.com"
] | 18210023228.com |
7736b0d581aa5a48107b2970929e39106a017b0b | 7ec92031e28b1a92a10a9f252f99211663e0d8f9 | /src/py/l0404.py | f10c6fd9600478f89fa456420ae59688baa37fea | [] | no_license | SS4G/leetcode_2020 | 4eb63f6afd59f84e44334e78cb06c7b33a89dd15 | 9a9a8fc779e7456db77f88e7dcdcc1f5cae92c62 | refs/heads/master | 2020-06-29T17:12:39.488350 | 2020-02-08T01:07:08 | 2020-02-08T01:07:08 | 200,575,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | class Solution(object):
def isLeaf(self, root):
if root is None:
return False
return root.left is None and root.right is None
def helper(self, root, leftSum):
if root is None or self.isLeaf(root):
return
if self.isLeaf(root.left):
leftSum[0] += root.left.val
self.helper(root.left, leftSum)
self.helper(root.right, leftSum)
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
leftSum = [0,]
self.helper(root, leftSum)
return leftSum[0]
| [
"[email protected]"
] | |
e3e74dbde5aa2e2be661c09d14954b7a98652ccb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_309/ch95_2019_11_27_18_50_15_020676.py | 2000a50c89ba9a929a27109ee30d898bc4254ee3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | def mais_populoso(dic):
k = dic.keys()
for v[1],v[4] in dic.values():
soma = v[1] + v[4]
maispop[k] = max(soma)
return k
| [
"[email protected]"
] | |
8fbf8a3b8b70759cf3a8cf7354914b9a62690737 | bdc7a4a56a1bfba53e897fc832e428f4600cd896 | /scripts/standoffstats.py | 39557be95f4f2bfa5d577a5d554df6b8a99cc8ac | [
"MIT"
] | permissive | spyysalo/consensus-pipeline | 4533fe92ec225c11fbfa6bce3b8ad6510b234b7c | 24cb58f2a28a49c22cd6847734228485f045b8cc | refs/heads/master | 2023-03-11T06:01:54.352460 | 2023-02-24T11:38:24 | 2023-02-24T11:38:24 | 179,457,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,080 | py | #!/usr/bin/env python3
import sys
import os
from collections import defaultdict, OrderedDict, Counter
from logging import info, warning
from standoff import Textbound, Normalization
try:
import sqlitedict
except ImportError:
error('failed to import sqlitedict, try `pip3 install sqlitedict`')
raise
# Normalization DB/ontology prefixes
TAXONOMY_PREFIX = 'NCBITaxon:'
# NCBI Taxonomy dump files
TAXONOMY_NODES = 'nodes.dmp'
TAXONOMY_DIVISION= 'division.dmp'
TAXONOMY_MERGED = 'merged.dmp'
# Keys for stats dict
ENTITY_TYPE = 'entity-type'
ENTITY_TEXT = 'text-overall'
TEXT_BY_TYPE = 'text ({})'
FRAGMENTED_SPAN = 'fragmented'
SAME_SPAN = 'same-span'
SAME_SPAN_TEXT = 'same-span-text'
CONTAINMENT = 'containment'
CONTAINMENT_TEXT = 'containment-text'
CROSSING_SPAN = 'crossing-span'
CROSSING_SPAN_TEXT = 'crossing-span-text'
TAXONOMY_RANK = 'taxonomy-rank'
TAXONOMY_DIV = 'taxonomy-division'
TAXONOMY_RANK_DIV = 'taxonomy-rank/division'
TAXONOMY_UNKNOWN = 'unknown-taxid'
TEXT_BY_RANK = 'rank ({})'
CONSISTENCY = 'document-consistency'
TOTALS = 'TOTAL'
# Order in which to show stats
STATS_ORDER = [
CROSSING_SPAN,
CROSSING_SPAN_TEXT,
SAME_SPAN,
SAME_SPAN_TEXT,
CONTAINMENT,
CONTAINMENT_TEXT,
TEXT_BY_TYPE,
FRAGMENTED_SPAN,
ENTITY_TEXT,
ENTITY_TYPE,
TAXONOMY_RANK,
TAXONOMY_DIV,
TAXONOMY_RANK_DIV,
TAXONOMY_UNKNOWN,
TEXT_BY_RANK,
CONSISTENCY,
TOTALS,
]
def argparser():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-l', '--limit', metavar='INT', type=int,
help='maximum number of documents to process')
ap.add_argument('-s', '--suffix', default='.ann',
help='annotation suffix')
ap.add_argument('-t', '--show-top', metavar='N', type=int, default=10,
help='show top N most frequent')
ap.add_argument('-T', '--taxdata', metavar='DIR', default=None,
help='NCBI taxonomy data directory')
ap.add_argument('data', nargs='+', metavar='DB')
return ap
def is_sqlite_db(path):
# TODO better identification
return os.path.splitext(os.path.basename(path))[1] == '.sqlite'
def find_overlapping(textbounds):
# Avoiding O(n^2) comparisons: create list of (offset, start/end, tb),
# sort with end<start, and then iterate over the list while maintaining
# a list of currently open.
START, END = 's', 'e', #1, -1 # need END < START for sort to work right
boundaries = []
for t in textbounds:
if t.end <= t.start:
# TODO: zero-widths require special treatment: as ends sort
# before starts, the basic algorithm doesn't work if end==start.
warning('find_overlapping: ignoring zero-width textbound: {}'.\
format(t))
continue
boundaries.append((t.start, START, t))
boundaries.append((t.end, END, t))
boundaries.sort()
overlapping = []
open_textbounds = OrderedDict()
for offset, boundary, textbound in boundaries:
if boundary == START: # overlaps with everything currently open
for t in open_textbounds.values():
overlapping.append((t, textbound))
assert textbound.id not in open_textbounds, 'duplicate id'
open_textbounds[textbound.id] = textbound
else:
assert boundary == END
del open_textbounds[textbound.id]
return overlapping
def take_stats(txt, ann, fn, stats, options):
annotations = []
for ln, line in enumerate(ann.splitlines(), start=1):
if not line or line.isspace() or line[0] not in 'TN':
info('skipping line {} in {}: {}'.format(ln, fn, line))
if line[0] == 'T':
id_, type_span, text = line.split('\t')
type_, span = type_span.split(' ', 1)
stats[ENTITY_TYPE][type_] += 1
stats[ENTITY_TEXT][text] += 1
stats[TEXT_BY_TYPE.format(type_)][text] += 1
stats[TOTALS]['textbounds'] += 1
if len(span.split(';')) > 1:
stats[FRAGMENTED_SPAN][type_] += 1
annotations.append(Textbound(id_, type_, span, text))
elif line[0] == 'N':
id_, type_rid_tid, text = line.split('\t')
type_, rid, tid = type_rid_tid.split(' ')
if (tid.startswith(TAXONOMY_PREFIX) and
options.taxdata is not None):
tax_id = tid[len(TAXONOMY_PREFIX):]
rank = options.taxdata.get_rank(tax_id)
if rank == '<UNKNOWN>':
stats[TAXONOMY_UNKNOWN][tax_id] += 1
division= options.taxdata.get_division(tax_id)
stats[TAXONOMY_RANK][rank] += 1
stats[TAXONOMY_DIV][division] += 1
stats[TAXONOMY_RANK_DIV]['/'.join([rank, division])] += 1
stats[TEXT_BY_RANK.format(rank)][text] += 1
stats[TOTALS]['normalizations'] += 1
else:
assert False, 'internal error'
stats[TOTALS]['documents'] += 1
is_consistent = True
overlapping = find_overlapping(annotations)
for t1, t2 in overlapping:
sorted_types = '{}-{}'.format(*sorted([t1.type, t2.type]))
if t1.span_matches(t2):
if t1.type == t2.type:
# same span, different types
is_consistent = False
stats[SAME_SPAN][sorted_types] += 1
stats[SAME_SPAN_TEXT][t1.text] += 1
elif t1.contains(t2):
stats[CONTAINMENT]['{} in {}'.format(t2.type, t1.type)] += 1
stats[CONTAINMENT_TEXT]['{} in {}'.format(t2.text, t1.text)] += 1
elif t2.contains(t1):
stats[CONTAINMENT]['{} in {}'.format(t1.type, t2.type)] += 1
stats[CONTAINMENT_TEXT]['{} in {}'.format(t1.text, t2.text)] += 1
elif t1.span_crosses(t2):
is_consistent = False
stats[CROSSING_SPAN]['{}/{}'.format(t1.type, t2.type)] += 1
stats[CROSSING_SPAN_TEXT]['{}/{}'.format(t1.text, t2.text)] += 1
else:
assert False, 'internal error'
if is_consistent:
stats[CONSISTENCY]['consistent'] += 1
else:
stats[CONSISTENCY]['inconsistent'] += 1
def process_db(path, stats, options):
# No context manager: close() can block and this is read-only
db = sqlitedict.SqliteDict(path, flag='r', autocommit=False)
count = 0
for key, val in db.items():
root, ext = os.path.splitext(key)
if ext != options.suffix:
continue
# txt_key = '{}.txt'.format(root)
# txt = db[txt_key] # everything hangs if I do this
take_stats('', val, key, stats, options)
count += 1
if options.limit is not None and count >= options.limit:
break
print('Done, processed {}.'.format(count), file=sys.stderr)
return count
def process(path, options):
stats = defaultdict(Counter)
if is_sqlite_db(path):
count = process_db(path, stats, options)
else:
raise NotImplementedError('filesystem input ({})'.format(path))
return stats
def report_stats(stats, options, out=sys.stdout):
categories = list(set(STATS_ORDER + list(stats.keys())))
rank = dict((c.split(' ')[0], i) for i, c in enumerate(STATS_ORDER))
categories = sorted(categories, key=lambda k: (rank[k.split(' ')[0]], k))
for category in categories:
if '{}' in category and category not in counts:
continue
counts = stats[category]
print('--- {} ---'.format(category), file=out)
for key, count in counts.most_common(options.show_top):
print(count, key, file=out)
extra = len(counts)-options.show_top
if extra > 0:
print('[and {} more]'.format(extra), file=out)
class TaxonomyData(object):
def __init__(self, rank_by_id, div_by_id, new_id):
self.rank_by_id = rank_by_id
self.div_by_id = div_by_id
self.new_id = new_id
def get_rank(self, tax_id):
if tax_id not in self.rank_by_id and tax_id in self.new_id:
tax_id = self.new_id[tax_id] # old id, use merged
return self.rank_by_id.get(tax_id, '<UNKNOWN>')
def get_division(self, tax_id):
if tax_id not in self.div_by_id and tax_id in self.new_id:
tax_id = self.new_id[tax_id] # old id, use merged
return self.div_by_id.get(tax_id, '<UNKNOWN>')
@classmethod
def from_directory(cls, path):
# Load NCBI taxonomy data from given directory
div_name_by_id = {}
with open(os.path.join(path, TAXONOMY_DIVISION)) as f:
for ln, l in enumerate(f, start=1):
l = l.rstrip('\n')
fields = l.split('\t')[::2] # skip separators
div_id, div_code, div_name = fields[:3]
div_name_by_id[div_id] = div_name
rank_by_id = {}
div_by_id = {}
with open(os.path.join(path, TAXONOMY_NODES)) as f:
for ln, l in enumerate(f, start=1):
l = l.rstrip('\n')
fields = l.split('\t')[::2] # skip separators
tax_id, parent_id, rank, embl_code, div_id = fields[:5]
rank_by_id[tax_id] = rank
div_by_id[tax_id] = div_name_by_id[div_id]
new_id_by_old_id = {}
with open(os.path.join(path, TAXONOMY_MERGED)) as f:
for ln, l in enumerate(f, start=1):
l = l.rstrip('\n')
fields = l.split('\t')[::2] # skip separators
old_id, new_id = fields[:2]
new_id_by_old_id[old_id] = new_id
return cls(rank_by_id, div_by_id, new_id_by_old_id)
def main(argv):
args = argparser().parse_args(argv[1:])
if args.taxdata is not None:
args.taxdata = TaxonomyData.from_directory(args.taxdata)
for d in args.data:
stats = process(d, args)
report_stats(stats, args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
f80268864677608dbee3b2b7ecd1bb4ba1dc5af0 | 5bce1118b13289308d23510f323c79aa972ddc27 | /src/modules/darknight/darknightHelper.py | 603a5835a327eb7035ca5a911afd98adc4c90f14 | [] | no_license | anupsl/pyApps | 62b64b90723de32684bbabee402220317a4fe817 | 2651d502c366b87449a0c977a9876cc32521c57c | refs/heads/master | 2022-07-03T05:49:12.828630 | 2020-05-10T17:25:26 | 2020-05-10T17:25:26 | 255,157,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,315 | py | import traceback,random, pymongo
from datetime import datetime
from src.utilities.logger import Logger
from src.Constant.constant import constant
from src.modules.darknight.darknightThrift import DarknightThrift
from src.modules.darknight.darknightObject import DarknightObject
from src.utilities.utils import Utils
from src.utilities.dbhelper import dbHelper
from src.utilities.mongoHelper import MongoHelper
class DarknightHelper():
@staticmethod
def checkDarknightConn(ignoreConnectionError=False):
Utils.checkServerConnection('DARK_KNIGHT_THRIFT_SERVICE', DarknightThrift, 'darknightPort', ignoreConnectionError)
@staticmethod
def getConnObj(newConnection=False):
port = constant.config['darknightPort']
connPort = str(port) + '_obj'
if connPort in constant.config:
if newConnection:
constant.config[connPort].close()
constant.config[connPort] = DarknightThrift(port)
return constant.config[connPort]
else:
return DarknightThrift(port)
@staticmethod
def getEmailStatus(email):
query = 'select status from email_status where email = "'+email+'"'
result = dbHelper.queryDB(query, "darknight")
if result:
return result[0][0]
else:
return 0
@staticmethod
def generateSmsWhitelistingData(tmpValue, mobile = '918660430751'):
if constant.config['cluster'] in ['nightly', 'staging']:
for i in range(0, 3):
try:
testCol = constant.config['mongoConn']
value = {
"mobile": mobile,
"delivered": 0,
"not_delivered": 0
}
value.update(tmpValue)
value['total'] = value['delivered'] + value['not_delivered']
batchReq = []
batchReq.append(pymongo.ReplaceOne({'mobile': mobile}, value, upsert=True))
testCol.bulk_write(batchReq)
Logger.log(testCol.find({'mobile' : mobile})[0])
return
except pymongo.errors.ConnectionFailure as e:
Logger.log(e)
port = constant.config['INTOUCH_DB_MONGO_MASTER']
if Utils.restartTunnel(port):
DarknightHelper.getMongoConnection('whitelisting', 'mobile_status')
else:
break
except Exception as e:
break
raise Exception(e)
@staticmethod
def monthlyDelta():
monthList = []
date = datetime.now()
for delta in range(0, 8):
m, y = (date.month-delta) % 12, date.year + ((date.month)-delta-1) // 12
if not m: m = 12
d = min(date.day, [31,29 if y%4==0 and not y%400==0 else 28,31,30,31,30,31,31,30,31,30,31][m-1])
monthList.append(date.replace(day=d,month=m, year=y))
return monthList
@staticmethod
def getMongoConnection(database, collection):
port = constant.config['INTOUCH_DB_MONGO_MASTER']
m = MongoHelper(database, collection, port)
constant.config['mongoConn'] = m.mongodb
| [
"[email protected]"
] | |
c4d24445005777c0176e500440d00d9c055d1285 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/Zhone.py | b7c88847cc326607084f9f5d852960f0444cc4eb | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 15,859 | py | #
# PySNMP MIB module Zhone (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Zhone
# Produced by pysmi-0.3.4 at Mon Apr 29 18:11:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
TimeTicks, Counter64, ObjectIdentity, Bits, IpAddress, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, ModuleIdentity, Counter32, NotificationType, Unsigned32, iso, MibIdentifier, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Counter64", "ObjectIdentity", "Bits", "IpAddress", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "ModuleIdentity", "Counter32", "NotificationType", "Unsigned32", "iso", "MibIdentifier", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
zhone = ModuleIdentity((1, 3, 6, 1, 4, 1, 5504))
zhone.setRevisions(('2011-12-05 16:58', '2011-05-06 00:20', '2010-02-19 10:51', '2009-05-27 02:08', '2008-01-23 11:46', '2007-11-09 13:05', '2007-10-16 10:26', '2007-02-17 13:43', '2006-06-09 12:48', '2005-12-01 14:20', '2004-10-13 14:40', '2004-10-08 11:15', '2004-08-11 15:42', '2004-01-30 13:34', '2003-10-28 11:03', '2003-07-17 14:29', '2002-03-04 15:34', '2001-10-09 12:07', '2000-09-28 16:32', '2000-12-18 16:32', '2000-12-20 17:20', '2001-02-07 17:11', '2001-02-22 11:35', '2001-04-10 14:35', '2001-05-15 10:32', '2001-06-26 17:06', '2001-06-28 13:33', '2001-07-31 08:51', '2001-08-29 16:56', '2001-08-31 15:33',))
if mibBuilder.loadTexts: zhone.setLastUpdated('201112052000Z')
if mibBuilder.loadTexts: zhone.setOrganization('Zhone Technologies')
zhoneRegistrations = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1))
if mibBuilder.loadTexts: zhoneRegistrations.setStatus('current')
zhoneRegPls = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1, 1))
if mibBuilder.loadTexts: zhoneRegPls.setStatus('current')
zhoneRegCpe = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1, 2))
if mibBuilder.loadTexts: zhoneRegCpe.setStatus('current')
zhoneRegMux = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1, 3))
if mibBuilder.loadTexts: zhoneRegMux.setStatus('current')
zhoneRegSechtor = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1, 4))
if mibBuilder.loadTexts: zhoneRegSechtor.setStatus('current')
zhoneRegWtn = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1, 5))
if mibBuilder.loadTexts: zhoneRegWtn.setStatus('current')
zhoneRegMalc = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 1, 6))
if mibBuilder.loadTexts: zhoneRegMalc.setStatus('current')
zhoneProduct = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2))
if mibBuilder.loadTexts: zhoneProduct.setStatus('current')
zhonePls = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 1))
if mibBuilder.loadTexts: zhonePls.setStatus('current')
zhoneZedge = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 2))
if mibBuilder.loadTexts: zhoneZedge.setStatus('current')
zhoneZplex = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 3))
if mibBuilder.loadTexts: zhoneZplex.setStatus('current')
zhoneSechtor = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 4))
if mibBuilder.loadTexts: zhoneSechtor.setStatus('current')
sechtor100 = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 4, 1))
if mibBuilder.loadTexts: sechtor100.setStatus('current')
sechtor300 = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 4, 2))
if mibBuilder.loadTexts: sechtor300.setStatus('current')
zhoneWtn = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 5))
if mibBuilder.loadTexts: zhoneWtn.setStatus('current')
zhoneMalc = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 6))
if mibBuilder.loadTexts: zhoneMalc.setStatus('current')
zhoneZmsProduct = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 2, 7))
if mibBuilder.loadTexts: zhoneZmsProduct.setStatus('current')
zhoneGeneric = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3))
if mibBuilder.loadTexts: zhoneGeneric.setStatus('current')
zhoneSystem = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 1))
if mibBuilder.loadTexts: zhoneSystem.setStatus('current')
zhoneShelf = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 2))
if mibBuilder.loadTexts: zhoneShelf.setStatus('current')
zhoneCard = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 3))
if mibBuilder.loadTexts: zhoneCard.setStatus('current')
zhoneSubscriber = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 4))
if mibBuilder.loadTexts: zhoneSubscriber.setStatus('current')
zhoneInterfaceTranslation = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 5))
if mibBuilder.loadTexts: zhoneInterfaceTranslation.setStatus('current')
zhoneInterfaceGroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 6))
if mibBuilder.loadTexts: zhoneInterfaceGroup.setStatus('current')
zhoneMasterAgent = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 7))
if mibBuilder.loadTexts: zhoneMasterAgent.setStatus('current')
zhoneTrapModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 8))
if mibBuilder.loadTexts: zhoneTrapModules.setStatus('current')
zhoneGenWtn = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 9))
if mibBuilder.loadTexts: zhoneGenWtn.setStatus('current')
zhoneZAP = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 10))
if mibBuilder.loadTexts: zhoneZAP.setStatus('current')
zhoneVoiceStats = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 11))
if mibBuilder.loadTexts: zhoneVoiceStats.setStatus('current')
zhoneSFF = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 12))
if mibBuilder.loadTexts: zhoneSFF.setStatus('current')
zhoneInterfaceConfig = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 3, 13))
if mibBuilder.loadTexts: zhoneInterfaceConfig.setStatus('current')
zhoneCommunicationProtocols = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4))
if mibBuilder.loadTexts: zhoneCommunicationProtocols.setStatus('current')
zhoneIp = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1))
if mibBuilder.loadTexts: zhoneIp.setStatus('current')
zhoneAtm = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 2))
if mibBuilder.loadTexts: zhoneAtm.setStatus('current')
zhoneVoice = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 3))
if mibBuilder.loadTexts: zhoneVoice.setStatus('current')
zhoneVoip = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 4))
if mibBuilder.loadTexts: zhoneVoip.setStatus('current')
zhonePpp = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 5))
if mibBuilder.loadTexts: zhonePpp.setStatus('current')
zhoneIma = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 6))
if mibBuilder.loadTexts: zhoneIma.setStatus('current')
zhoneBridge = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 7))
if mibBuilder.loadTexts: zhoneBridge.setStatus('current')
zhoneVideo = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 8))
if mibBuilder.loadTexts: zhoneVideo.setStatus('current')
zhoneIsdn = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 9))
if mibBuilder.loadTexts: zhoneIsdn.setStatus('current')
zhoneCes = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 10))
if mibBuilder.loadTexts: zhoneCes.setStatus('current')
zhoneSs7 = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 11))
if mibBuilder.loadTexts: zhoneSs7.setStatus('current')
zhoneClass5 = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 12))
if mibBuilder.loadTexts: zhoneClass5.setStatus('current')
zhoneBonding = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 13))
if mibBuilder.loadTexts: zhoneBonding.setStatus('current')
zhoneRadius = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 14))
if mibBuilder.loadTexts: zhoneRadius.setStatus('current')
zhoneIua = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 15))
if mibBuilder.loadTexts: zhoneIua.setStatus('current')
zhone802Dot1Mibs = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 16))
if mibBuilder.loadTexts: zhone802Dot1Mibs.setStatus('current')
zhonePtp = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 17))
if mibBuilder.loadTexts: zhonePtp.setStatus('current')
zhonePhysical = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5))
if mibBuilder.loadTexts: zhonePhysical.setStatus('current')
zhoneEnet = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 1))
if mibBuilder.loadTexts: zhoneEnet.setStatus('current')
zhoneDsx = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 2))
if mibBuilder.loadTexts: zhoneDsx.setStatus('current')
zhoneOcx = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 3))
if mibBuilder.loadTexts: zhoneOcx.setStatus('current')
zhoneDsl = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 4))
if mibBuilder.loadTexts: zhoneDsl.setStatus('current')
zhoneConsole = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 5))
if mibBuilder.loadTexts: zhoneConsole.setStatus('current')
zhoneRadio = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 8))
if mibBuilder.loadTexts: zhoneRadio.setStatus('current')
zhoneSonet = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 9))
if mibBuilder.loadTexts: zhoneSonet.setStatus('current')
zhoneDs3Ext = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 10))
if mibBuilder.loadTexts: zhoneDs3Ext.setStatus('current')
zhoneLineTypes = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 11))
if mibBuilder.loadTexts: zhoneLineTypes.setStatus('current')
zhoneApon = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 12))
if mibBuilder.loadTexts: zhoneApon.setStatus('current')
zhoneVdsl = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 13))
if mibBuilder.loadTexts: zhoneVdsl.setStatus('current')
zhoneGpon = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 14))
if mibBuilder.loadTexts: zhoneGpon.setStatus('current')
zhoneWdm = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 15))
if mibBuilder.loadTexts: zhoneWdm.setStatus('current')
zhoneCpe = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 5, 16))
if mibBuilder.loadTexts: zhoneCpe.setStatus('current')
zhoneModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 6))
if mibBuilder.loadTexts: zhoneModules.setStatus('current')
zhoneShelfSlotTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 7), )
if mibBuilder.loadTexts: zhoneShelfSlotTable.setStatus('current')
zhoneShelfSlotEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 7, 1), ).setIndexNames((0, "Zhone", "zhoneShelfIndex"), (0, "Zhone", "zhoneSlotIndex"))
if mibBuilder.loadTexts: zhoneShelfSlotEntry.setStatus('current')
zhoneShelfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zhoneShelfIndex.setStatus('current')
zhoneSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zhoneSlotIndex.setStatus('current')
zhoneCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5504, 9))
zhoneGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5504, 9, 1))
zhoneShelfSlotGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5504, 9, 1, 1)).setObjects(("Zhone", "zhoneShelfIndex"), ("Zhone", "zhoneSlotIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
zhoneShelfSlotGroup = zhoneShelfSlotGroup.setStatus('current')
zhoneCompliance = MibIdentifier((1, 3, 6, 1, 4, 1, 5504, 9, 2))
zhoneShelfSlotCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5504, 9, 2, 1)).setObjects(("Zhone", "zhoneShelfSlotGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
zhoneShelfSlotCompliance = zhoneShelfSlotCompliance.setStatus('current')
zhoneExperimental = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10))
if mibBuilder.loadTexts: zhoneExperimental.setStatus('current')
ietfDrafts = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1))
if mibBuilder.loadTexts: ietfDrafts.setStatus('current')
apsMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 1))
if mibBuilder.loadTexts: apsMIB.setStatus('current')
sipTC = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 2))
if mibBuilder.loadTexts: sipTC.setStatus('current')
sipCommonMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 3))
if mibBuilder.loadTexts: sipCommonMIB.setStatus('current')
sipUAMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 4))
if mibBuilder.loadTexts: sipUAMIB.setStatus('current')
pktcIetfSigMib = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 5))
if mibBuilder.loadTexts: pktcIetfSigMib.setStatus('current')
efmOamMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 6))
if mibBuilder.loadTexts: efmOamMIB.setStatus('current')
efmCuMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 7))
if mibBuilder.loadTexts: efmCuMIB.setStatus('current')
pwTcStdMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 8))
if mibBuilder.loadTexts: pwTcStdMIB.setStatus('current')
ianaPwe3MIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 9))
if mibBuilder.loadTexts: ianaPwe3MIB.setStatus('current')
pwStdMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 10))
if mibBuilder.loadTexts: pwStdMIB.setStatus('current')
pwTDMMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 11))
if mibBuilder.loadTexts: pwTDMMIB.setStatus('current')
zhoneRmonMibModule = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 1, 12))
if mibBuilder.loadTexts: zhoneRmonMibModule.setStatus('current')
zhoneDrafts = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 10, 2))
if mibBuilder.loadTexts: zhoneDrafts.setStatus('current')
mibBuilder.exportSymbols("Zhone", zhoneCpe=zhoneCpe, zhone=zhone, zhoneZAP=zhoneZAP, zhoneIma=zhoneIma, zhoneShelfIndex=zhoneShelfIndex, zhoneWtn=zhoneWtn, zhoneRegWtn=zhoneRegWtn, zhonePhysical=zhonePhysical, zhoneConsole=zhoneConsole, zhoneRegistrations=zhoneRegistrations, zhoneRmonMibModule=zhoneRmonMibModule, zhoneSonet=zhoneSonet, zhoneIua=zhoneIua, zhoneCompliance=zhoneCompliance, zhoneSs7=zhoneSs7, zhoneDsl=zhoneDsl, zhoneRegPls=zhoneRegPls, ietfDrafts=ietfDrafts, zhonePpp=zhonePpp, zhone802Dot1Mibs=zhone802Dot1Mibs, zhoneGroups=zhoneGroups, zhoneProduct=zhoneProduct, sechtor300=sechtor300, zhonePtp=zhonePtp, zhoneAtm=zhoneAtm, zhoneEnet=zhoneEnet, sechtor100=sechtor100, zhoneDrafts=zhoneDrafts, zhoneShelf=zhoneShelf, zhonePls=zhonePls, zhoneModules=zhoneModules, zhoneShelfSlotEntry=zhoneShelfSlotEntry, zhoneSechtor=zhoneSechtor, zhoneApon=zhoneApon, zhoneZmsProduct=zhoneZmsProduct, apsMIB=apsMIB, zhoneSFF=zhoneSFF, sipTC=sipTC, zhoneRegCpe=zhoneRegCpe, zhoneClass5=zhoneClass5, zhoneCes=zhoneCes, zhoneCard=zhoneCard, zhoneVideo=zhoneVideo, zhoneRegMalc=zhoneRegMalc, zhoneCompliances=zhoneCompliances, zhoneVdsl=zhoneVdsl, zhoneSystem=zhoneSystem, zhoneInterfaceGroup=zhoneInterfaceGroup, zhoneShelfSlotTable=zhoneShelfSlotTable, pwTcStdMIB=pwTcStdMIB, zhoneInterfaceTranslation=zhoneInterfaceTranslation, pktcIetfSigMib=pktcIetfSigMib, zhoneVoip=zhoneVoip, zhoneMalc=zhoneMalc, zhoneShelfSlotCompliance=zhoneShelfSlotCompliance, zhoneShelfSlotGroup=zhoneShelfSlotGroup, zhoneBridge=zhoneBridge, sipUAMIB=sipUAMIB, zhoneGeneric=zhoneGeneric, zhoneVoiceStats=zhoneVoiceStats, zhoneExperimental=zhoneExperimental, zhoneDs3Ext=zhoneDs3Ext, zhoneSlotIndex=zhoneSlotIndex, zhoneVoice=zhoneVoice, zhoneIsdn=zhoneIsdn, zhoneMasterAgent=zhoneMasterAgent, zhoneLineTypes=zhoneLineTypes, PYSNMP_MODULE_ID=zhone, efmOamMIB=efmOamMIB, zhoneRegMux=zhoneRegMux, zhoneBonding=zhoneBonding, pwStdMIB=pwStdMIB, zhoneOcx=zhoneOcx, zhoneZplex=zhoneZplex, zhoneCommunicationProtocols=zhoneCommunicationProtocols, zhoneTrapModules=zhoneTrapModules, zhoneRadius=zhoneRadius, ianaPwe3MIB=ianaPwe3MIB, zhoneWdm=zhoneWdm, efmCuMIB=efmCuMIB, zhoneInterfaceConfig=zhoneInterfaceConfig, sipCommonMIB=sipCommonMIB, zhoneZedge=zhoneZedge, zhoneGpon=zhoneGpon, zhoneIp=zhoneIp, zhoneSubscriber=zhoneSubscriber, zhoneGenWtn=zhoneGenWtn, zhoneDsx=zhoneDsx, zhoneRadio=zhoneRadio, pwTDMMIB=pwTDMMIB, zhoneRegSechtor=zhoneRegSechtor)
| [
"[email protected]"
] | |
46d063dfbd564d8ef90b175f6753adb82a364ec0 | fc3d16b7a195652d4276d3112c8be856bd908f9a | /news_app/source_class.py | 9c72380aa29892333fd4d9981523f34d8ab838cd | [
"MIT"
] | permissive | petermirithu/pyra-s_news_centre | eb0f1735a62763a86c289facabc3985c1398ad6c | c8726f2db2d007b5584685a969d66df41be50ba5 | refs/heads/master | 2021-06-30T22:28:51.639786 | 2019-11-18T14:27:03 | 2019-11-18T14:27:03 | 221,890,761 | 0 | 0 | MIT | 2021-03-20T02:11:35 | 2019-11-15T09:31:49 | Python | UTF-8 | Python | false | false | 212 | py | class Source:
'''
source class to define how news sources look
'''
def __init__(self,id,name,language,country):
self.id=id
self.name=name
self.language=language
self.country=country
| [
"[email protected]"
] | |
f2893378baeddcab8d17ddf72b21c3ed6cd59617 | 2a70521e76564ff14c63100adaecc87fee40f8f4 | /profiles/views.py | c6213b66adf442c56a31d9681a5f5d72b15c6348 | [] | no_license | Telling/bornhack-website | 6f6bef9ea632c675b1a7e10dae69acd00def0d42 | 18a9ae27867c046a2b9fac46aa886c2788b139e7 | refs/heads/master | 2020-12-01T11:48:34.633206 | 2016-08-03T12:06:46 | 2016-08-03T12:06:46 | 64,944,894 | 1 | 0 | null | 2016-08-04T15:14:34 | 2016-08-04T15:14:34 | null | UTF-8 | Python | false | false | 1,147 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import DetailView, UpdateView
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from . import models, forms
class ProfileDetail(LoginRequiredMixin, DetailView):
model = models.Profile
def get_object(self, queryset=None):
return models.Profile.objects.get(user=self.request.user)
class ProfileUpdate(LoginRequiredMixin, UpdateView):
model = models.Profile
form_class = forms.ProfileForm
success_url = reverse_lazy('profiles:detail')
def get_object(self, queryset=None):
return models.Profile.objects.get(user=self.request.user)
def get_form_kwargs(self):
kwargs = super(ProfileUpdate, self).get_form_kwargs()
kwargs['initial'] = {'email': self.object.user.email}
return kwargs
def form_valid(self, form, **kwargs):
self.object.user.email = form.cleaned_data['email']
self.object.user.save()
messages.info(self.request, 'Your profile has been updated.')
return super(ProfileUpdate, self).form_valid(form, **kwargs)
| [
"[email protected]"
] | |
2d103b89fc1095dd48685e70e5488433471a6d7c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03671/s288171235.py | 5a407677fafb20f7d42eccf5001ae17587259711 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | a,b,c=map(int,input().split())
if a<b<c:
print(a+b)
elif a<c<b:
print(a+c)
elif b<a<c:
print(b+a)
elif b<c<a:
print(b+c)
elif c<a<b:
print(c+a)
else:
print(c+b) | [
"[email protected]"
] | |
1f4dc6b15a35654f4fd7cbffe57fb09b08ec4292 | 565ae8473c545c43341f5511b9633e97f0e4da8b | /course3_python_advanced/Advanced EXAM/PRACTICE/2020.02.18 - 18 feb - 300/02_book_worm_ab_40%-100%.py | 5ffe46f0b9e7b363acc9cf6207cf7537dfacd5ae | [] | no_license | andriiburka/Web-Development-with-Python | 3934c1a3945bd983ab39d38b97f1af16fe784207 | b6927653a2c6a9cc10a8768395233e347624c49a | refs/heads/master | 2022-11-21T21:42:04.898254 | 2020-07-29T22:59:56 | 2020-07-29T22:59:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | from collections import deque
word = input()
matrix = deque([list(map(str, "".join(input()))) for row in range(int(input()))])
p_position = [[row, col] for row in range(len(matrix)) for col in range(len(matrix[row])) if matrix[row][col] == 'P']
r, c = p_position[0]
for i in range(int(input())):
cmd = input()
if cmd == 'up':
if r - 1 > -1:
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r - 1][c].isalpha():
word += matrix[r - 1][c]
matrix[r - 1][c] = tmp
r -= 1
continue
elif r - 1 < 0:
word = word[:-1]
elif cmd == 'down':
if r + 1 < len(matrix):
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r + 1][c].isalpha():
word += matrix[r + 1][c]
matrix[r + 1][c] = tmp
r += 1
continue
elif r + 1 > len(matrix) - 1:
word = word[:-1]
elif cmd == 'left':
if c - 1 >= 0:
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r][c - 1].isalpha():
word += matrix[r][c - 1]
matrix[r][c - 1] = tmp
c -= 1
continue
elif c - 1 < 0:
word = word[:-1]
elif cmd == 'right':
if c + 1 < len(matrix):
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r][c + 1].isalpha():
word += matrix[r][c + 1]
matrix[r][c + 1] = tmp
c += 1
continue
elif c + 1 > len(matrix) - 1:
word = word[:-1]
print(word)
[print("".join(row)) for row in matrix] | [
"[email protected]"
] | |
2f92923a11d152fe876581c9aa12ada7aca0867d | 0309bd25cdd8e89297f507be202634b07f5f6e85 | /LeetCode/Easy/Python3/tests/test_findpivotindex.py | 58071d2723b3c74b2a0b101e3193af71ebb68f08 | [] | no_license | AmyShackles/algo-practice | 10fc4a5c5926232ff2b0aed6183cec9f21bf15f3 | 876e3be57357651348465f70ab312d4ac98d667a | refs/heads/main | 2023-04-03T08:29:33.725236 | 2021-04-13T19:20:18 | 2021-04-13T19:20:18 | 338,672,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import unittest
from Python3.findpivotindex import Solution
class TestpivotIndex(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
# For sanity checking:
# Input: nums = [1,7,3,6,5,6]
# Output: 3
self.assertEqual(Solution.pivotIndex([1, 7, 3, 6, 5, 6]), 3)
def test_2(self):
# For sanity checking:
# Input: nums = [1,2,3]
# Output: -1
self.assertEqual(Solution.pivotIndex([1, 2, 3]), -1)
def test_3(self):
# For sanity checking:
# Input: nums = [2,1,-1]
# Output: 0
self.assertEqual(Solution.pivotIndex([2, 1, -1]), 0)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
1323a5fcd18009582e6eb70f125a050d19ab596d | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/934.py | 5bd1ce553337e88919cc9f3a7dad7be6a3849492 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 493 | py | import collections
from collections import Counter
def word_count(word):
cnt = Counter()
temp = []
tempStr = ""
tempLen = len(word)
tempCount = 0
print tempLen
for l in word:
if l != " " and l != "\n":
tempStr += l
print tempStr
else:
if tempStr != "":
temp.append(tempStr)
tempStr = ""
if tempCount == tempLen-1:
temp.append(tempStr)
print tempCount
tempCount += 1
for l in temp:
cnt[l] += 1
return cnt
print(word_count("yo"))
| [
"[email protected]"
] | |
6139dcf79ba85c68ed2f324e8a92a94503733e64 | 5fc8864b934ae90b438375b4c705ed2d350d4afc | /sheep/utils/qiniu/cache.py | 4f26ff0ef542572342e77f5ed77515efe23546f6 | [] | no_license | L-BraveDog/sheep | 71a1aabfc756a8458055c52936d2713f9aab24c8 | 4d6eda7c6358571d6680a1f2a2949ee3ac4220e7 | refs/heads/master | 2023-07-29T07:43:24.164143 | 2020-08-12T11:33:05 | 2020-08-12T11:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | # -*- coding: utf-8 -*-
# author:CY
# datetime:2020/7/22 15:48
import json
import hashlib
from rest_framework.response import Response
from rest_framework_extensions.cache.decorators import CacheResponse
class QiNiuCacheResponse(CacheResponse):
def process_cache_response(self,
view_instance,
view_method,
request,
args,
kwargs):
key = self.generate_key(request)
data = self.cache.get(key)
if not data:
response = view_method(view_instance, request, *args, **kwargs)
if not response.status_code >= 400 or self.cache_errors:
self.cache.set(key, response.data, self.timeout)
else:
response = Response(data=data)
if not hasattr(response, '_closable_objects'):
response._closable_objects = []
return response
@staticmethod
def generate_key(request):
""" 加密影响性能.去除"""
# hl = hashlib.md5()
# hl.update(json.dumps(request.data).encode(encoding='utf-8'))
# return hl.hexdigest()
return f'qiniu_{request.data.get("bucket", None)}'
qi_niu_cache_response = QiNiuCacheResponse | [
"[email protected]"
] | |
12f32d4f3f4bb1de9257cc030b7cddf990cc172f | 500bca3e22bd0c30c79b74918e9847742b3c428e | /cli/jobs/pipelines/add-column-and-word-count-using-spark/src/add_greeting_column.py | 2afc93333d7b05cbae3195d47f1536e7fd2d811b | [
"MIT"
] | permissive | Azure/azureml-examples | 2304c862fd2e36e6640ecc4d09f69c5ed93b48ab | e5f7b247d4753f115a8f7da30cbe25294f71f9d7 | refs/heads/main | 2023-08-31T00:10:14.107509 | 2023-08-30T17:29:22 | 2023-08-30T17:29:22 | 289,334,021 | 1,219 | 1,074 | MIT | 2023-09-14T16:00:55 | 2020-08-21T18:04:26 | Jupyter Notebook | UTF-8 | Python | false | false | 474 | py | from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
import argparse
from utils import util
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
parser = argparse.ArgumentParser()
parser.add_argument("--file_input")
args = parser.parse_args()
greeting_udf = udf(util.greeting)
df = spark.read.option("header", "true").csv(args.file_input)
df = df.withColumn("greeting", greeting_udf(df.species)).show()
print(sc.getConf().getAll())
| [
"[email protected]"
] | |
9a13e896e1e705299c0e6cce39a6869ed0000f39 | 647b5eb4bdcd8cbc903a8576cf50385219905a00 | /euler/python/35.py | d2011060dfb1f11b21f1171e68d13ee14ff79cb1 | [] | no_license | kradalby/programming-tasks | 149ef4a62903a940c7297196e6984e17dee28011 | 96685634a8fea87cacda3e75be377383ac67a0ef | refs/heads/master | 2021-04-29T10:17:21.707426 | 2020-01-09T17:16:52 | 2020-01-09T17:16:52 | 77,645,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from math import floor, sqrt
def is_prime(n):
if n == 2:
return True
elif n == 1:
return False
elif n < 4:
return True
elif n % 2 == 0:
return False
elif n < 9:
return True
elif n % 3 == 0:
return False
else:
r = floor(sqrt(n))
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f+2) == 0:
return False
f += 6
return True
def get_primes(n):
primes = []
for i in range(1,n+1):
if is_prime(i):
primes.append(i)
return primes
def is_circular_prime(n):
ns = str(n)
for i in range(len(ns)):
print(ns)
if not is_prime(int(ns[1:]+ns[:1])):
return False
ns = ns[1:]+ns[:1]
return True
primes = get_primes(1000000)
print(len(primes))
circ = []
for i in primes:
if is_circular_prime(i):
circ.append(i)
print(len(circ))
| [
"[email protected]"
] | |
eb182742ba237c0829e394079c6126094edb1ed2 | 119a85a388fe436361530fbb47932e704d749557 | /PEAK-0.5a4dev_r2085/build/lib.macosx-10.6-x86_64-2.7/peak/metamodels/UML13/model/Behavioral_Elements/Use_Cases.py | efb8d979100cb09f4d8b6afecba259544f901264 | [
"Python-2.0"
] | permissive | chrisrgunn/cs156project | 014d5b05c6bf0e08ab8bd0dea525057d0e65b9a7 | e5414a37f9793c8b0674695b948482b559b18ea6 | refs/heads/master | 2021-01-19T14:09:49.046539 | 2017-05-24T02:10:29 | 2017-05-24T02:10:29 | 88,128,762 | 0 | 2 | null | 2017-05-04T23:49:09 | 2017-04-13T05:36:10 | Python | UTF-8 | Python | false | false | 3,294 | py | # ------------------------------------------------------------------------------
# Package: peak.metamodels.UML13.model.Behavioral_Elements.Use_Cases
# File: peak\metamodels\UML13\model\Behavioral_Elements\Use_Cases.py
# ------------------------------------------------------------------------------
from peak.util.imports import lazyModule as _lazy
_model = _lazy('peak.model.api')
#_config = _lazy('peak.config.api')
Core = _lazy(__name__, '../../Foundation/Core')
Common_Behavior = _lazy(__name__, '../Common_Behavior')
# ------------------------------------------------------------------------------
class UseCase(Core.Classifier):
class extend(_model.StructuralFeature):
referencedType = 'Extend'
referencedEnd = 'extension'
sortPosn = 0
class extend2(_model.StructuralFeature):
referencedType = 'Extend'
referencedEnd = 'base'
sortPosn = 1
class include(_model.StructuralFeature):
referencedType = 'Include'
referencedEnd = 'addition'
sortPosn = 2
class include2(_model.StructuralFeature):
referencedType = 'Include'
referencedEnd = 'base'
sortPosn = 3
class extensionPoint(_model.StructuralFeature):
referencedType = 'ExtensionPoint'
referencedEnd = 'useCase'
sortPosn = 4
class Actor(Core.Classifier):
pass
class UseCaseInstance(Common_Behavior.Instance):
pass
class Extend(Core.Relationship):
class condition(_model.StructuralFeature):
referencedType = 'Foundation/Data_Types/BooleanExpression'
upperBound = 1
lowerBound = 1
sortPosn = 0
class base(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'extend2'
upperBound = 1
lowerBound = 1
sortPosn = 1
class extension(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'extend'
upperBound = 1
lowerBound = 1
sortPosn = 2
class extensionPoint(_model.StructuralFeature):
referencedType = 'ExtensionPoint'
referencedEnd = 'extend'
lowerBound = 1
sortPosn = 3
class Include(Core.Relationship):
class addition(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'include'
upperBound = 1
lowerBound = 1
sortPosn = 0
class base(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'include2'
upperBound = 1
lowerBound = 1
sortPosn = 1
class ExtensionPoint(Core.ModelElement):
class location(_model.StructuralFeature):
referencedType = 'Foundation/Data_Types/LocationReference'
upperBound = 1
lowerBound = 1
sortPosn = 0
class useCase(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'extensionPoint'
upperBound = 1
lowerBound = 1
sortPosn = 1
class extend(_model.StructuralFeature):
referencedType = 'Extend'
referencedEnd = 'extensionPoint'
sortPosn = 2
# ------------------------------------------------------------------------------
#_config.setupModule()
| [
"[email protected]"
] | |
fed5d4acae7285a1eebaad8d868f82fae5a50334 | 4b4df51041551c9a855468ddf1d5004a988f59a2 | /leetcode_python/Bit_Manipulation/binary-number-with-alternating-bits.py | 54a6df3e0278a36e092633aa43846cee89ba652e | [] | no_license | yennanliu/CS_basics | 99b7ad3ef6817f04881d6a1993ec634f81525596 | 035ef08434fa1ca781a6fb2f9eed3538b7d20c02 | refs/heads/master | 2023-09-03T13:42:26.611712 | 2023-09-03T12:46:08 | 2023-09-03T12:46:08 | 66,194,791 | 64 | 40 | null | 2022-08-20T09:44:48 | 2016-08-21T11:11:35 | Python | UTF-8 | Python | false | false | 1,643 | py | # V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79089937
# IDEA : GREEDY
# TO CHECK IF 0 ALWAYS NEXT/BEFORE 1 (1 ALWAYS NEXT/BEFORE 0)
# IDEA : ALL FUN IN PYTHON
# https://www.programiz.com/python-programming/methods/built-in/all
# Return Value from all()
# The all() method returns:
# True - If all elements in an iterable are true
# False - If any element in an iterable is false
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
bin_n = bin(n)[2:]
return all(bin_n[i] != bin_n[i+1] for i in range(len(bin_n) - 1))
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/79089937
# IDEA : PATTERN
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
b = 0b1010101010101010101010101010101010101010101010101010101010101010
while b > 0:
if b == n:
return True
b = b >> 1
return False
# V1''
# https://blog.csdn.net/fuxuemingzhu/article/details/79089937
# IDEA : BIT MANIPULATION
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
n ^= (n >> 1)
return not (n & n + 1)
# V2
# Time: O(1)
# Space: O(1)
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
n, curr = divmod(n, 2)
while n > 0:
if curr == n % 2:
return False
n, curr = divmod(n, 2)
return True
| [
"[email protected]"
] | |
614ff2ef1cd74d9d05909ec0c30ad60bffdc6e0e | 30291450c064006f1bd9bc5c432b8a869e2166bb | /tags/1.3/zhpy/info.py | 11b9f7e04ad0e89c86cb8dab2c053353bdebcd40 | [
"MIT"
] | permissive | BGCX261/zhpy-svn-to-git | 96f04e2f72c61671324219a85939137ff5cd9ef6 | 70da095393fe13543433ab5115cb6c1a519d64b0 | refs/heads/master | 2021-01-22T22:49:04.898314 | 2015-08-25T15:44:00 | 2015-08-25T15:44:00 | 41,587,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,498 | py | """
zhpy package and plugin information
This is the MIT license:
http://www.opensource.org/licenses/mit-license.php
Copyright (c) 2007 Fred Lin and contributors. zhpy is a trademark of Fred Lin.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pkg_resources
import sys
entrypoints = {"Traditional Chinese Keywords":"plugtw.tools",
"Simplified Chinese Keywords":"plugcn.tools"}
def retrieve_info():
"""
retrieve package and plugins info
"""
packages=['%s' % i for i in pkg_resources.require("zhpy")]
#plugins = {}
#for name, pointname in entrypoints.items():
# plugins[name] = ["%s (%s) - %d" % (entrypoint.name, \
# str(entrypoint.dist), \
# len(entrypoint.load().keyword))
# for entrypoint in pkg_resources.iter_entry_points(pointname)
# ]
return packages#, plugins
def info():
"""
show zhpy informations including version and plugins
ported from TurboGears2 tginfo command
"""
print """
Complete zhpy Version Information
zhpy requires:
"""
print " * python",sys.version.split()[0]
#packages, plugins = retrieve_info()
packages = retrieve_info()
for p in packages:
print ' *', p
# print """\nzhpy extends:"""
# for name, pluginlist in plugins.items():
# print "\n", name, "\n"
# for plugin in pluginlist:
# print ' *', plugin
print "" | [
"[email protected]"
] | |
8a01907224c2e522b023d5e25c32cc5d0c980401 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/pyinstaller/PyInstaller/hooks/pre_find_module_path/hook-distutils.py | 501afb283c000e166bbb745cdda79f4e8589c51b | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a57a0c2455f448f73674516e52b7af5089c4865777cbdaec011240a51f08272c
size 1744
| [
"[email protected]"
] | |
e13c7088fa38705604685a9e8160543ba1c225f3 | e039a739bb0580befef599bb71c72e64838b9924 | /Exp_Hyperparams/run_dSVDD.py | ff99d808a0a13d4aa4511e1c18879c79e15b83aa | [] | no_license | ddatta-DAC/AD_7 | ab16785f543b8390731cab3195921ca6cbbc4f0a | e894b01712e6ad66f6b5715fcb0afa94a49ccf94 | refs/heads/master | 2023-04-16T15:07:09.274543 | 2021-04-27T16:12:40 | 2021-04-27T16:12:40 | 290,634,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,294 | py | import torch
import random
import numpy as np
import os
import sys
import pandas as pd
sys.path.append('../../../.')
sys.path.append('../../.')
sys.path.append('../')
import yaml
from tqdm import tqdm
import argparse
from joblib import Parallel, delayed
try:
from .deepsvdd import deepsvdd as deepsvdd
except:
import deepsvdd as deepsvdd
try:
from .deepsvdd import base
except:
from deepsvdd import base
try:
from .deepsvdd import optim
except:
from deepsvdd import optim
try:
from .deepsvdd import *
except:
from deepsvdd import *
try:
from .deepsvdd.networks.AE import FC_dec
from .deepsvdd.AE import FC_enc
from .deepsvdd.deepSVDD import DeepSVDD
except:
from deepsvdd.networks.AE import FC_dec
from deepsvdd.networks.AE import FC_enc
from deepsvdd.deepSVDD import DeepSVDD
try:
from eval import eval
except:
from .eval import eval
try:
from . import logger_utils
except:
import logger_utils
try:
from .data_fetcher_v2 import data_fetcher
except:
from data_fetcher_v2 import data_fetcher
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Device ::', DEVICE)
def train_model(
data_dict,
config,
objective='soft-boundary',
nu = 0.01
):
global DEVICE
layer_dims = config['layer_dims']
LR = config['LR']
num_epochs = config['num_epochs']
batch_size = config['batch_size']
warm_up_epochs = config['warm_up_epochs']
ae_epochs = config['ae_epochs']
train_X = data_dict['train']
fc_layer_dims = [train_X.shape[1]] + list(layer_dims)
# Initialize DeepSVDD model and set neural network \phi
deep_SVDD = DeepSVDD(
DEVICE,
objective=objective,
nu = nu
)
deep_SVDD.set_network(fc_layer_dims)
# Train model on dataset
deep_SVDD.train(
train_X,
LR = LR,
num_epochs = num_epochs,
batch_size= batch_size,
ae_epochs = ae_epochs,
warm_up_epochs=warm_up_epochs
)
return deep_SVDD
def test_eval(model_obj, data_dict, num_anomaly_sets):
test_X = data_dict['test']
test_scores = model_obj.test(test_X)
auc_list = []
for idx in range(num_anomaly_sets):
key = 'anom_' + str(idx + 1)
anom_X = data_dict[key]
anom_scores = model_obj.test(anom_X)
auPR = eval.eval(anom_scores, test_scores, order='descending')
auc_list.append(auPR)
print("AUC : {:0.4f} ".format(auPR))
_mean = np.mean(auc_list)
_std = np.std(auc_list)
print(' Mean AUC ', np.mean(auc_list))
print(' AUC std', np.std(auc_list))
return _mean, _std
def execute(DATA_SET, nu, objective, id , config, anom_perc, num_anomaly_sets ):
data_dict, _ = data_fetcher.get_data(
DATA_SET,
set_id=id,
num_anom_sets=num_anomaly_sets,
anomaly_perc=anom_perc
)
model_obj = train_model(data_dict, config = config, nu=nu, objective = objective)
mean_aupr, std = test_eval(model_obj, data_dict, num_anomaly_sets)
return (mean_aupr, std)
parser = argparse.ArgumentParser(description='Run the model ')
parser.add_argument(
'--DATA_SET',
type=str,
help=' Which data set ?',
default='kddcup',
choices=['kddcup', 'kddcup_neptune', 'nsl_kdd', 'nb15','gureKDD']
)
parser.add_argument(
'--num_runs',
type=int,
default=1,
help='Number of runs'
)
parser.add_argument(
'--anom_perc',
type=int,
help='Percentage of anomalies',
default=None
)
parser.add_argument(
'--objective',
type=str,
default = 'one-class',
help='objective',
choices=['one-class', 'soft-boundary']
)
# =========================================
args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = logger_utils.get_logger(LOG_FILE,'deepSVDD')
LOGGER.info(DATA_SET)
config_file = 'config.yaml'
anom_perc = args.anom_perc
with open(config_file, 'r') as fh:
config = yaml.safe_load(fh)
num_anomaly_sets = config[DATA_SET]['num_anomaly_sets']
anomaly_ratio = config[DATA_SET]['anomaly_ratio']
anom_perc = 100 * anomaly_ratio/(1+anomaly_ratio)
step = 0.025
nu_values = np.arange(0.025,0.2+step,step)
nu_vs_auc = []
objective = args.objective
if anom_perc is None:
anom_perc = 100 * anomaly_ratio/(1+anomaly_ratio)
LOGGER.info(' Setting anomaly percentage to {} %'.format(anom_perc))
LOGGER.info(' Settingobjective to {} '.format(objective))
model_config = config[DATA_SET]['dsvdd']
for nu in nu_values:
LOGGER.info('Setting nu :: {}'.format(nu))
_res_ = Parallel(n_jobs=num_runs)(delayed(execute)(
DATA_SET, nu, objective, id, model_config, anom_perc, num_anomaly_sets ) for id in range(1,num_runs+1)
)
results = np.array(_res_)
mean_all_runs = np.mean(results[:,0])
_std = np.std(results[:,0])
LOGGER.info(' Runs {}: Mean: {:4f} | Std {:4f}'.format(num_runs, mean_all_runs, _std))
print('Mean AuPR over {} runs {:4f}'.format(num_runs, mean_all_runs))
print('Details: ', results[:,0])
nu_vs_auc.append((nu, mean_all_runs))
nu_vs_auc = np.array(nu_vs_auc)
LOGGER.info('nu vs AuPR '+ str(nu_vs_auc[:,0]) + str(nu_vs_auc[:,1]))
logger_utils.close_logger(LOGGER)
| [
"[email protected]"
] | |
df961abfa16708fa9957e155b7ca530bd8114e97 | 79bb7105223895235263fd391906144f9f9645fd | /python/framework/ops_test.py | 859f6c24a322a6864de3635c1e792c81200ef965 | [] | no_license | ml-lab/imcl-tensorflow | f863a81bfebe91af7919fb45036aa05304fd7cda | 54ab3ec2e32087ce70ecae2f36b56a8a92f2ba89 | refs/heads/master | 2021-01-22T06:37:18.129405 | 2016-06-08T15:53:28 | 2016-06-08T15:53:28 | 63,518,098 | 1 | 2 | null | 2016-07-17T06:29:14 | 2016-07-17T06:29:13 | null | UTF-8 | Python | false | false | 57,911 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import constant_op
# Import gradients to register _IndexedSlicesToTensor.
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TensorTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
op = ops.Operation(
ops._NodeDef("noop", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(isinstance(t, ops.Tensor))
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in t:
pass
class SparseTensorTest(test_util.TensorFlowTestCase):
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = ops.SparseTensorValue(indices, values, shape)
for sp in [
ops.SparseTensor(indices, values, shape),
ops.SparseTensor.from_value(sp_value)]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.shape.dtype, dtypes.int64)
with self.test_session() as sess:
value = sp.eval()
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.shape)
sess_run_value = sess.run(sp)
self.assertAllEqual(sess_run_value.indices, value.indices)
self.assertAllEqual(sess_run_value.values, value.values)
self.assertAllEqual(sess_run_value.shape, value.shape)
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(tensor.eval(), [[2, 3], [0, 0], [5, 7]])
def testNegation(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
def testScalarMul(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("noop", "bar")
self.assertProtoEquals("op: 'noop' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.DeviceSpec(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
ops.RegisterShape("a")(None)
ops.RegisterShape("b")(None)
ops.RegisterShape("c")(None)
ops.RegisterShape("add")(None)
ops.RegisterShape("an_op")(None)
ops.RegisterShape("const")(None)
ops.RegisterShape("copy")(None)
ops.RegisterShape("foo")(None)
ops.RegisterShape("identity")(None)
ops.RegisterShape("mul")(None)
ops.RegisterShape("nonrefop")(None)
ops.RegisterShape("noop")(None)
ops.RegisterShape("refop")(None)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
def testNoInputs(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[],
[dtypes.float32, dtypes.string])
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t._consumers))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t._consumers))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'noop' name:'myop'", op.node_def)
def testNoOutputs(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "myop1"), g, [], [dtypes.float32])
float_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g, [float_t], [])
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t._consumers))
self.assertEqual(op2, float_t._consumers[0])
self.assertProtoEquals("op:'noop' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'reop' name:'myop2' input:'myop1'",
op2.node_def)
def testInputsAndOutputs(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "myop1"), g, [], [dtypes.float32])
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g,
[], [dtypes.float32, dtypes.string])
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = ops.Operation(ops._NodeDef("add", "myop3"), g,
[float1_t, label2_str_t, label2_str_t],
[dtypes.float32, dtypes.int32])
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t._consumers))
self.assertEqual(op3, float1_t._consumers[0])
self.assertEqual(0, len(float2_t._consumers))
self.assertEqual(2, len(label2_str_t._consumers))
self.assertEqual(op3, label2_str_t._consumers[0])
self.assertEqual(op3, label2_str_t._consumers[1])
self.assertProtoEquals("""
op:'add' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'noop' name:'myop' device:'/job:goo/device:GPU:0' ",
op.node_def)
op = ops.Operation(ops._NodeDef("noop", "op2"), ops.Graph(), [], [])
op._set_device(pydev.DeviceSpec(job="muu", device_type="CPU",
device_index=0))
self.assertProtoEquals(
"op:'noop' name:'op2' device:'/job:muu/device:CPU:0'",
op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(ops._NodeDef("noop", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'noop' name:'op1'",
op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("refop", "op2"), g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = ops.Operation(
ops._NodeDef("nonrefop", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
def testShapeFunctionAbsence(self):
def _test():
pass
g = ops.Graph()
with self.assertRaises(RuntimeError):
g.create_op("shapeless_op", [], [dtypes.float32])
def testNoShapeFunction(self):
g = ops.Graph()
op = ops.Operation(ops._NodeDef("op", "an_op"), g,
output_types = [dtypes.float32])
self.assertEqual(tensor_shape.unknown_shape(),
_apply_op(g, "an_op", [], [dtypes.float32]).get_shape())
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("const", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op("add",
[],
[dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op("foo",
[list(op1.values())[0], list(op2.values())[1],
list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'const'", op1.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op("noop", [],
[dtypes.float32_ref, dtypes.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op("refop", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("nonrefop", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("const", [], [dtypes.float32], None, name="myop1")
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(g, "add",
[],
[dtypes.float32, dtypes.string],
name="myop2")
t3 = _apply_op(g, "foo", [t1, t2[1], t2[0]],
[dtypes.float32, dtypes.int32], name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'const'", t1.op.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "noop", [], [dtypes.float32_ref, dtypes.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(g, "refop", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(g, "nonrefop", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual("bar/baz/foo", g.unique_name("foo",
mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo",
mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual("bar/baz_1/foo", g.unique_name("foo",
mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo",
mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual("bar_1/baz/foo", g.unique_name("foo",
mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
def testNameAndVariableScope(self):
with self.test_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual("l0/l1/l1/foo",
sess.graph.unique_name("foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual("l0/l1/l2/foo",
sess.graph.unique_name("foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("const", [], [dtypes.float32, dtypes.float32])
self.assertEqual("const", op0.name)
self.assertEqual("const:0", op0.outputs[0].name)
self.assertEqual("const:1", op0.outputs[1].name)
op1 = g.create_op("const", [], [dtypes.float32])
self.assertEqual("const_1", op1.name)
self.assertEqual("const_1:0", op1.outputs[0].name)
op2 = g.create_op("const", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op2.name)
self.assertEqual("my_op:0", op2.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("const",
g.create_op("const", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/const",
g.create_op("const", [], [dtypes.float32]).name)
self.assertEqual("bar/const_1",
g.create_op("const", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar",
g.create_op("const", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/const",
g.create_op("const", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/const",
g.create_op("const", [], [dtypes.float32]).name)
self.assertEqual("baz",
g.create_op("const", [], [dtypes.float32],
name=scope).name)
self.assertEqual("trailing",
g.create_op("const", [], [dtypes.float32],
name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/const",
g.create_op("const", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/const_2",
g.create_op("const", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("an_op", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op" }
""", gd)
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op" device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(pydev.DeviceSpec(job="worker", replica=2, task=0,
device_type="CPU", device_index=3)):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:0" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/device:GPU:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/device:CPU:0" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps/device:CPU:0" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(""):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:0" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/device:GPU:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/device:CPU:0" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps/device:CPU:0" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:7" }
node { name: "an_op_1" op: "an_op"
device: "/device:GPU:7" }
node { name: "an_op_2" op: "an_op"
device: "/device:CPU:*" }
node { name: "an_op_3" op: "an_op"
device: "/device:CPU:5" }
""", gd)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("an_op", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:overwrite" }
node { name: "an_op_1" op: "an_op"
device: "/job:overwrite" }
node { name: "an_op_2" op: "an_op"
device: "/job:overwrite" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps" }
""", gd)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def an_op(g):
return _apply_op(g, "an_op", [], [dtypes.float32])
ops.NoGradient("an_op")
def copy_op(x):
return _apply_op(x.graph, "copy", [x], [x.dtype])
@ops.RegisterGradient("copy")
def _CopyGrad(op, x_grad):
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad):
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
def testRegisterGradients(self):
g = ops.Graph()
x = an_op(g)
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "copy_override"}):
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "unknown_override"}):
y = copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
fn = ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "const", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
def testBasic(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
b = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a]):
c = _apply_op(g, "const", [], [dtypes.float32])
d = _apply_op(g, "identity", [b], [dtypes.float32])
e = _apply_op(g, "identity", [c], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "const", [], [dtypes.float32])
self.assertItemsEqual(
[a_1.op, a_2.op, a_3.op, a_4.op], b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "const", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "const", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "const", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "const", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "const", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_1 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_1 = _apply_op(g, "mul", [b_1, c_1], [dtypes.float32])
e_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_2 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_2 = _apply_op(g, "mul", [b_2, c_2], [dtypes.float32])
e_2 = _apply_op(g, "mul", [e_1, e_1], [dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_3 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_3 = _apply_op(g, "mul", [b_3, c_3], [dtypes.float32])
e_3 = _apply_op(g, "mul", [e_2, e_2], [dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_4 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_4 = _apply_op(g, "mul", [b_4, c_4], [dtypes.float32])
e_4 = _apply_op(g, "mul", [e_3, e_3], [dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("foo", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("a", [], [dtypes.float32]),
g0.create_op("b", [], [dtypes.float32])]
with self.assertRaises(ValueError):
with ops.op_scope(values, None):
pass
with self.assertRaises(ValueError):
with ops.op_scope(values, None, None):
pass
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
with ops.op_scope([a, b], "") as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.op_scope([a, b], "", "my_default_scope") as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.op_scope([a, b], scope_name, default_scope_name) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.op_scope([a, b], None, default_scope_name) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.op_scope(graph_elements, scope_name) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
c = g1.create_op("c", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.op_scope(graph_elements + [c], scope_name):
pass
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
self._testGraphElements([a, b])
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
sparse = ops.SparseTensor(
_apply_op(g0, "const", [], [dtypes.int64]),
_apply_op(g0, "const", [], [dtypes.float32]),
_apply_op(g0, "const", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self._AssertDefault(orig)
g0 = ops.Graph()
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "const:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
def testNoLabel(self):
with self.test_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
def testLabelMap(self):
with self.test_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", default_1.eval())
self.assertAllEqual(b"My label is: default", default_2.eval())
self.assertAllEqual(b"My label is: default", default_3.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_1.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_2.eval())
self.assertAllEqual(b"My label is: overload_2", overload_2.eval())
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
for version in range(versions.GRAPH_DEF_VERSION_MIN_PRODUCER,
versions.GRAPH_DEF_VERSION + 2):
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = version
with self.test_session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "an_op", [], [dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
""", gd)
# NOTE(petewarden): Dummy stats registrations for ops used in the tests.
@ops.RegisterStatistics("a", "weight_parameters")
def _calc_a_weight_params(unused_graph, unused_node):
return ops.OpStats("weight_parameters", 10)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_parameters")
self.assertEqual(10, weight_params.value)
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
weight_params_total = ops.OpStats("weight_parameters")
self.assertEqual(None, weight_params_total.value)
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
first_weight_params = ops.OpStats("weight_parameters", 100)
weight_params_total += first_weight_params
self.assertEqual(100, weight_params_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
second_weight_params = ops.OpStats("weight_parameters", 200)
weight_params_total += second_weight_params
self.assertEqual(300, weight_params_total.value)
class ColocationGroupTest(test_util.TensorFlowTestCase):
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/gpu:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/gpu:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/gpu:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/gpu:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testInconsistentDeviceWithinColocate(self):
with ops.device("/gpu:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# This is allowed due to legacy but clearly wrong, since we
# should really be colocating with 'a'. We allow devices to
# override colocate_with, but we log warnings to suggest that
# this is probably unintentional or misguided.
with ops.device("/cpu:0"):
b = constant_op.constant([3.0], name="b")
self.assertEqual("/device:CPU:0", b.device)
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = 7
old = test_ops.old()
with self.test_session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
def testGraphExecutionFail(self):
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = 7
old = test_ops.old()
g.graph_def_versions.producer = versions.GRAPH_DEF_VERSION
with self.test_session(graph=g):
with self.assertRaisesRegexp(errors.UnimplementedError, self._error()):
old.run()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
if __name__ == "__main__":
googletest.main()
| [
"[email protected]"
] | |
d6e8f0d5efead89c44dddb8d6ccd4759b14870c7 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/6df5a87840c9f271ab86792449dc945cadc82f12-<get>-bug.py | 8034b8985edb4a0b14bb3eca635326fdb5537a2a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py |
def get(self, request, organization, integration_id):
try:
integration = Integration.objects.get(organizations=organization, id=integration_id, provider='github')
except Integration.DoesNotExist:
return Response(status=404)
field = request.GET.get('field')
query = request.GET.get('query')
if (field is None):
return Response({
'detail': 'field is a required parameter',
}, status=400)
if (not query):
return Response({
'detail': 'query is a required parameter',
}, status=400)
installation = integration.get_installation(organization.id)
if (field == 'externalIssue'):
repo = request.GET.get('repo')
if (repo is None):
return Response({
'detail': 'repo is a required parameter',
}, status=400)
try:
response = installation.search_issues(query=('repo:%s %s' % (repo, query)).encode('utf-8'))
except Exception as e:
return self.handle_api_error(e)
return Response([{
'label': ('#%s %s' % (i['number'], i['title'])),
'value': i['number'],
} for i in response.get('items', [])])
if (field == 'repo'):
account_type = ('user' if (integration.metadata['account_type'] == 'User') else 'org')
full_query = ('%s:%s %s' % (account_type, integration.name, query)).encode('utf-8')
try:
response = installation.get_client().search_repositories(full_query)
except Exception as e:
return self.handle_api_error(e)
return Response([{
'label': i['name'],
'value': i['full_name'],
} for i in response.get('items', [])])
return Response(status=400)
| [
"[email protected]"
] | |
67ec96965335573a4257c08e53f071a3e550aafc | 3a38e0ae9eef4a00ff7206c85c0366be141c73b3 | /edu_smart_server/threads/migrations/0001_initial.py | d038c835c43b4a1311266371e25cd64daf0efeb6 | [] | no_license | CodeNicely/edusmart_final | a32dac680b6343f4822624394b06db334e51ff61 | 64b2d14abbbf94e396f3d11529de437cf7561cad | refs/heads/master | 2021-01-09T06:06:14.422656 | 2017-02-05T08:29:18 | 2017-02-05T08:29:18 | 80,914,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-05 00:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('classes', '0001_initial'),
('department', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='message_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_id', models.CharField(max_length=120, null=True)),
('author_name', models.CharField(max_length=120, null=True)),
('message', models.CharField(max_length=120)),
('teacher', models.BooleanField(default=False)),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='thread_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_level', models.IntegerField()),
('title', models.CharField(max_length=120, null=True)),
('description', models.CharField(max_length=120, null=True)),
('author', models.CharField(max_length=120, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('class_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='classes.class_data')),
('department', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='department.department_data')),
],
),
migrations.AddField(
model_name='message_data',
name='thread_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='threads.thread_data'),
),
]
| [
"[email protected]"
] | |
15f39983e23350dc4c36b142edf378d99df662ba | d5820207ff265362743a1a2d833fdaf5187cc308 | /src/cern/jpymad/tools_optics.py | eee53b56036711ab319657afd127c960e90e24fb | [
"Apache-2.0"
] | permissive | pymad/jpymad | 54925b14e2fa34b07b17999d3ca05934f4b2309b | 6372ada76de400ed949ff5161a30699a7adedaba | refs/heads/master | 2021-01-10T20:47:01.991269 | 2014-06-28T14:08:57 | 2014-06-28T14:08:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | #-------------------------------------------------------------------------------
# This file is part of PyMad.
#
# Copyright (c) 2011, CERN. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 20:20:15 2010
@author: kaifox
"""
from __future__ import absolute_import
from .conversions import tofl, tostr
def get_values(optic, madxvarname):
"""
extract the values for the given madx-variable from the optcs object
PARAMETERS:
===========
optic: the object from which to extract the values
madxvarname: the name of the madx-variable for which to extract the values
"""
madxvar = pms.enums.MadxTwissVariable.fromMadxName(madxvarname) #@UndefinedVariable
values = optic.getAllValues(madxvar)
return tofl(values)
def get_names(optic):
'''
extracts the element names from the optics
'''
return tostr(optic.getNames())
| [
"[email protected]"
] | |
9518dbd5c0b43537d56acbc0c6d1d96bd5c035b6 | d79c978cc60afc6ffae1c7fc7730ed4d1e2eb77a | /app.py | 7cf24266880151663a0307c165dd5c5bd3506047 | [] | no_license | qhuydtvt/tk-vids | 9ea5af7ea81d735c9baf5830a493ce879ac6d735 | 71225cea0bf67bf4b843b4fb6aa1641bfc9ad10b | refs/heads/master | 2021-09-13T05:48:24.842104 | 2018-04-25T16:41:12 | 2018-04-25T16:41:12 | 105,641,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | from flask import Flask, render_template, request
from flask_cors import CORS
from urllib.request import urlopen
import json
import mlab
from models.audio import Audio
from flask_restful import Resource, Api
app = Flask(__name__)
cors = CORS(app, resources={r'/api/*': {"origins": "*"}})
api = Api(app)
mlab.connect()
class ApiAudio(Resource):
def get(self):
search_terms = request.args["search_terms"].lower().strip()
audio = Audio.objects(search_terms=search_terms).first()
if audio is not None:
return {
'success': 1,
'data': mlab.item2json(audio)
}
else:
return {
'success': 0,
'message': 'Not found'
}
def webpage_str(url):
return urlopen(url).read.decode('utf-8')
not_found_message = json.dumps ({
"sucess": 0,
"data": "not_found"
})
@app.route('/')
def index():
guide_list = [
{
"title": "Pure audio search",
"example": "api/audio?search_terms=thunder+imagine+dragons",
"format": "api/audio?search_terms=<Enter song|artist here>",
"parse_xml": "http://bit.ly/tk-xml-parser"
}
]
return render_template("index.html", guide_list=guide_list)
api.add_resource(ApiAudio, '/api/audio')
if __name__ == '__main__':
app.run(port=1212)
| [
"[email protected]"
] | |
17b962c6c281ee6f291f1520e6448f9c088dd12e | 3d7dece5254e42059e8a2cb1e72b295460284983 | /components/py_engine/micropython-lib/micropython/umqtt.robust/setup.py | f0f23ed8cc7dc9a25d7aae0acd27d4cfe92f24ab | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"Python-2.0"
] | permissive | windowxia/AliOS-Things | 172639d6e0d2b2e2e816bce757cf95e89187c132 | a99f20706f9c666903a12a205edce13263b1fadb | refs/heads/master | 2023-09-01T06:03:57.853390 | 2023-07-04T05:51:52 | 2023-07-04T06:49:36 | 149,751,180 | 0 | 0 | Apache-2.0 | 2018-09-21T10:56:09 | 2018-09-21T10:56:08 | null | UTF-8 | Python | false | false | 719 | py | import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(
name="micropython-umqtt.robust",
version="1.0.1",
description='Lightweight MQTT client for MicroPython ("robust" version).',
long_description=open("README.rst").read(),
url="https://github.com/micropython/micropython-lib",
author="Paul Sokolovsky",
author_email="[email protected]",
maintainer="micropython-lib Developers",
maintainer_email="[email protected]",
license="MIT",
cmdclass={"sdist": sdist_upip.sdist},
packages=["umqtt"],
)
| [
"[email protected]"
] | |
6e99309c21dc75f92af4fbcfeb21e488e84a0537 | 228fd55571b31cdcf54ef42a7338ca42ab399588 | /battery_client.py | 0e885f3c27579ae3896668884b5f49661a95bf00 | [] | no_license | benthomasson/ev3-play | 12aae6f846be11ad3fe029148eb084edccd7b991 | d416a0fd2eebc1a13bef601ccfe98bcf0c17ef61 | refs/heads/master | 2022-09-04T00:37:10.463877 | 2022-08-22T12:50:35 | 2022-08-22T12:50:35 | 253,309,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Echo client program
import socket
import json
import psutil
import time
def send_message(s, msg_type, message_data):
message = json.dumps([msg_type, message_data])
length = len(message)
s.sendall(f'{length:04x}'.encode())
s.sendall(message.encode())
HOST = 'localhost'
PORT = 50007 # The same port as used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
send_message(s, 'Message', dict(msg='Hello world'))
while True:
battery = psutil.sensors_battery()
send_message(s, 'Battery', dict(percent=battery.percent))
time.sleep(1)
| [
"[email protected]"
] | |
8059eea3c4fe11d53b16161e256869544a1f7b8a | eafabc5e332f5fc0153e166d992ac0711cf90cd6 | /BOJ/2644/2644번(촌수계산).py | 3753969c178fbd1055026ee542090ce391404eee | [] | no_license | PARKINHYO/Algorithm | 96038ce21bd9f66208af0886208ef6ed925c23e2 | 0ed8687fe971fc2b05e2f50f62c0d0e47c368a6c | refs/heads/master | 2021-12-23T23:48:25.247979 | 2021-08-20T01:52:50 | 2021-08-20T01:52:50 | 196,219,508 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | from collections import defaultdict
from sys import stdin
class Graph():
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def BFS(self, s, v):
visited = [False] * 105
queue = []
result = [0]* 105
queue.append(s)
visited[s] = True
while queue:
s = queue.pop(0)
for i in self.graph[s]:
if visited[i] == False:
queue.append(i)
visited[i] = True
result[i] = result[s] + 1
if result[v] == 0:
print(-1)
else:
print(result[v])
if __name__ == '__main__':
g = Graph()
# f = open('input.txt', 'r')
# file_txt = []
# for line in f:
#
# file_txt.append(line[:-1])
# f.close()
n = int(stdin.readline())
a, b = map(int, stdin.readline().split(" "))
m = int(stdin.readline())
for i in range(m):
tmp1, tmp2 = map(int, stdin.readline().split(" "))
g.addEdge(tmp1, tmp2)
g.BFS(a, b)
| [
"[email protected]"
] | |
a8abd28e470a50db39911b213d6efe1374a7962b | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/fbs_1148+444/sdB_FBS_1148+444_coadd.py | 0254387ab7054d564d646cf529283b7c20657047 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[177.84925,44.212194], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_FBS_1148+444/sdB_FBS_1148+444_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_FBS_1148+444/sdB_FBS_1148+444_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
36055e4e753fdcc3fdb4cb14e6b24e2dc38ddaa7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2535/48090/315890.py | c1bf75e0cf13230b181db8b8e4608c8c69acc14b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | arr=eval(input())
class Solution:
def maxBlockToSorted(self, arr):
res, max_val = 0, arr[0]
for i, num in enumerate(arr):
if num > max_val:
max_val = num
if max_val == i:
res += 1
return res
c=Solution()
print(c.maxBlockToSorted(arr)) | [
"[email protected]"
] | |
747ecb88556de97d63232ef35d142e151af9ca44 | 50aa9303450e06d1172f78c0478a58e5113d9bb9 | /988 arranging-coions.py | 69f2c7dd8cf72e28b804be49986218f4e6b11bce | [] | no_license | zlldt/LintCode | 6e1041b78a301651378833caf7fd7db9ce112ec5 | e5012161131a8c8557bdb0296980b2a0b712c620 | refs/heads/master | 2021-06-27T05:24:08.471072 | 2019-03-02T12:56:26 | 2019-03-02T12:56:26 | 105,424,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | class Solution:
"""
@param n: a non-negative integer
@return: the total number of full staircase rows that can be formed
"""
def arrangeCoins(self, n):
# Write your code here
start = 1
sum = 1
if n==1:
return 1
while sum<n:
start += 1
sum = start*(1+start)/2
return start-1
| [
"[email protected]"
] | |
88ffbba45ab17a0c77184fb4c8d033d5dbe545b5 | a22f0ae4b4674f29449cc7f5aa9bd335e06c12eb | /MPDA_cfg_txt.py | 9403f2b34016a9386b4fbcf3bc4b808488df17d7 | [] | no_license | Tesla2fox/MPDA_OPERATION | e2a4a0a49e7d8a2bacaca8191cb9a5b58e74b06a | c33784a28f49b8e7333b74b86191d958e66ff021 | refs/heads/master | 2020-03-13T11:34:11.117054 | 2018-04-26T05:15:51 | 2018-04-26T05:15:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,810 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 09:01:23 2018
@author: robot
"""
#import classDefine
import classDefine as cd
import plotly.plotly as py
import plotly.graph_objs as go
import math
import plotly
import copy
import random
import time
import datetime
import subprocess
#__import__ ('classDefine')
#py.sign_in('tesla_fox', 'HOTRQ3nIOdYUUszDIfgN')
#RobInfoMat=[1 2 1 0.2 3 1 1 0.3 5 2 1 0.4]
#TaskInfoMat= [7 8 0.15 5;5 9 0.25 6;10 12 0.12 4];
for runTimes in range(0,100):
robotNum = 50
taskNum = 50
fileDir = 'D://VScode//MPDA_orgDecode//data//'
f = open(fileDir+'MPDA_cfg.txt','w')
#write time
f.write('time ' +datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'\n')
f.write('AgentNum ' + str(robotNum)+'\n')
f.write('TaskNum '+ str(taskNum)+'\n')
robotList = []
taskPntList = []
#increaseRatioList = []
#initStateList = []
drawData = []
#create the taskPnt
for i in range(0,taskNum):
taskpnt = cd.TaskPnt()
taskpnt.random()
# taskpnt.displayTaskPnt()
taskPntList.append(copy.deepcopy(taskpnt))
# increaseRatioList.append(taskpnt.increaseRatio)
# initStateList.append(taskpnt.initState)
taskPntTrace = taskpnt.getTrace()
drawData.append(copy.deepcopy(taskPntTrace))
# print('<<<<<<<<<<<<<<<<')
#write txt
f.write('TaskIncreaseRatio ')
for j in range(0,taskNum):
f.write(' ')
f.write(str(taskPntList[j].increaseRatio))
f.write('\n')
f.write('TaskInitState ')
for j in range (0,taskNum):
f.write(' ')
f.write(str(taskPntList[j].initState))
f.write('\n')
#end write txt
#create the robots
f.write('AgentAbility ')
for i in range(0,robotNum):
rob = cd.Robot()
rob.random()
# rob.displayRobot()
robotList.append(copy.deepcopy(rob))
f.write(' '+str(rob.ability))
f.write('\n')
#end create
f.write('TaskDisMat ')
for i in range (0,taskNum):
for j in range(i+1,taskNum):
dis = cd.distance_point(taskPntList[i].pnt,taskPntList[j].pnt)
f.write(' '+str(dis))
f.write('\n')
f.write('Ag2TaskDisMat ')
for i in range (0,robotNum):
for j in range (0,taskNum):
dis = cd.distance_point(robotList[i].pnt,taskPntList[j].pnt)
f.write(' ' +str(dis))
f.write('\n')
f.write('Encode ')
for i in range(0,robotNum):
permutationList =[]
for j in range (0,taskNum):
permutationList.append(j)
random.shuffle(permutationList)
for j in range (0,taskNum):
f.write(' '+ str(permutationList[j]))
# print(disOrderPermutation)
f.write('\n')
#write something that C++ don't need to use
f.write('robotPosstion\n')
f.write('<<<<<<<<<\n')
for i in range(0,robotNum):
f.write('index '+ str(i))
f.write(' pntx = ' +str(robotList[i].pnt.x) + ' pnty = ' +
str(robotList[i].pnt.y) + '\n')
f.write('taskPosition\n')
f.write('<<<<<<<<<\n')
for i in range (0,taskNum):
f.write('index '+ str(i))
f.write(' pntx = ' +str(taskPntList[i].pnt.x) + ' pnty = ' +
str(taskPntList[i].pnt.y) + '\n')
# end writing the data to the configure txt
f.close()
pname = "D:\\VScode\\MPDA_StaticConstrn\\bin\\mpda_StaticConstrn\\Debug\\mpda_StaticConstrn.exe"
p = subprocess.Popen(pname,stdin =subprocess.PIPE,stdout = subprocess.PIPE)
o = p.communicate()
print(runTimes,'<<<<<<<<<<<<<<<<')
| [
"[email protected]"
] | |
6376219e886f308f530668c9f2fa4f1282fb10d8 | f8d7a3ef65a445a13195f4f2c80a232d40edc271 | /fileupload/urls.py | 84b8774454c63e8adab2006dbe5167c240e69ba0 | [
"Apache-2.0"
] | permissive | neha-webllisto/file_upload | c954cc00da916364e969d68a6ef6e5b4fe898d3e | 877e9da88677e9ad73f3b2f85dcfab7dbdf49e45 | refs/heads/master | 2020-04-14T16:22:16.825108 | 2019-01-03T09:19:12 | 2019-01-03T09:19:12 | 163,950,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """fileupload URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('upload.urls'))
]
| [
"[email protected]"
] | |
c2613d4de725de56bbbf01deaaf1c35c04082806 | 36afa271f080459adf1014cd23f4be9f954dfee6 | /Crawler/Course/第八章:scrapy框架/fbsPro/fbsPro/middlewares.py | 8bdbfa2bca493aa2de753b574c117a9d86a9738a | [] | no_license | King-Of-Game/Python | b69186a7574ce1c0b7097207cfe9a2eb38a90bc0 | 643b9fd22efd78f6679735f23432943a57b5f5bb | refs/heads/master | 2023-05-25T05:35:14.473114 | 2021-10-24T12:52:21 | 2021-10-24T12:52:21 | 151,251,434 | 3 | 0 | null | 2023-05-01T20:51:50 | 2018-10-02T12:34:04 | HTML | UTF-8 | Python | false | false | 3,597 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class FbsproSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class FbsproDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
8d02b656752897966fe1cd8292a3a66a92ed3bff | 2447effa4ea6ae298c826967cc572583f6bde5d9 | /contacts/models.py | ecf15cf1cb0e166529c5044bf20154da3f8f2e84 | [] | no_license | lydia-karungi/Ruforum | fc317d74092c9d77903fd5a96b46c296fee9b2a7 | d1d6714fbac47a357bc51f0867f577b98e1bbf62 | refs/heads/main | 2023-02-06T04:10:51.032368 | 2020-12-23T09:11:33 | 2020-12-23T09:11:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,422 | py | from django.db import models
from django.contrib import auth
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from common.choices import COUNTRY_CHOICES, GENDER_CHOICES
from phonenumber_field.modelfields import PhoneNumberField
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import datetime
from grant_types.models import Granttype
API_KEY = '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'
class Student(models.Model):
DEGREE_PROGRAM_LEVELS = (
('msc', 'Msc'),
('phd', 'PhD'),
('mphil', 'MPhil'),
('bachelor', 'Bachelor'),
)
STUDENT_TYPE = (
('other', 'OTHER'),
('regional_programs', 'Regional Programs'),
('competitive_grants', 'Competitive Grants'),
)
YEAR_CHOICES = (
(year, year) for year in range(datetime.date.today().year, 1990 - 1, -1)
)
user = models.OneToOneField('User', models.DO_NOTHING)
student_no = models.CharField(max_length=64, unique=True, null=True, editable=False)
year_of_birth = models.CharField(max_length=4)
university = models.CharField(max_length=100)
university_department = models.CharField(max_length=64)
university_reg_no = models.CharField(max_length=20)
degree_program_level = models.CharField(max_length=8, choices=DEGREE_PROGRAM_LEVELS)
degree_program_name = models.CharField(max_length=64)
intake_year = models.CharField(max_length=4)
grad_expected = models.DateField(blank=True, null=True)
grad_actual = models.DateField(blank=True, null=True)
thesis_title = models.CharField(max_length=256)
cohort = models.IntegerField(blank=True, null=True)
supervisor1 = models.CharField(max_length=128)
supervisor2 = models.CharField(max_length=128)
supervisor3 = models.CharField(max_length=128)
research_abstract = models.TextField()
funder = models.CharField(max_length=100, null=True, blank=True)
grant_type = models.ForeignKey(Granttype, models.DO_NOTHING, null=True)
student_type = models.CharField(max_length=20, choices=STUDENT_TYPE, default='other')
graduated = models.BooleanField(default=False)
def __str__(self):
return self.user.get_full_name()
class Studentfundingsource(models.Model):
student = models.ForeignKey(Student, models.DO_NOTHING)
funder = models.TextField()
items = models.CharField(max_length=60)
amount = models.IntegerField(blank=True, null=True)
def __str__(self):
return '{} {}'.format(str(self.student), self.funder)
class MyUserManager(BaseUserManager):
def create_user(self, business_email, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not business_email:
raise ValueError('Users must have an email address')
user = self.model(
business_email=self.normalize_email(business_email)
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, business_email, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
business_email=business_email,
password=password,
# date_of_birth=date_of_birth,
)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
TITLE_CHOICES =(
('mr', 'Mr'),
('ms','Mrs'),
('miss','Miss'),
('dr','Dr'),
('prof','Prof')
)
'''
This model represents the application user, the business email is used as the
user name during login'''
password = models.CharField(max_length=128, blank=True)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.BooleanField(default=False)
business_email = models.CharField(unique=True, max_length=254)
is_staff = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(default=timezone.now, blank=True)
title = models.CharField(choices=TITLE_CHOICES, max_length=32)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
gender = models.CharField(max_length=6, choices=GENDER_CHOICES)
contact_type = models.CharField(max_length=32, blank=True)
passport_no = models.CharField(_('Passport Number'),max_length=16, blank=True)
home_address = models.TextField()
business_address = models.TextField(blank=True)
country = models.CharField(max_length=64, choices=COUNTRY_CHOICES)
nationality = models.CharField(max_length=64, choices=COUNTRY_CHOICES)
job_title = models.CharField(max_length=64, blank=True)
institution = models.CharField(max_length=128)
area_of_specialisation = models.CharField(max_length=128)
personal_email = models.CharField(max_length=254, blank=True)
skype_id = models.CharField(max_length=32, blank=True)
yahoo_messenger = models.CharField(max_length=32, blank=True)
msn_id = models.CharField(max_length=32, blank=True)
home_tel = models.CharField(max_length=20, blank=True)
business_tel = PhoneNumberField()
mobile = PhoneNumberField()
fax = models.CharField(max_length=20, blank=True)
notes = models.TextField(blank=True)
picture = models.FileField(blank=True, null=True)
cv = models.FileField(blank=True, null=True)
department = models.CharField(max_length=128, blank=True)
highest_qualification = models.CharField(max_length=128)
email_confirmed = models.BooleanField(default=False)
groups = models.ManyToManyField(Group, blank=True)
user_permissions = models.ManyToManyField(
Permission,
verbose_name=_('user permissions'),
blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set",
related_query_name="user",
)
objects = MyUserManager()
USERNAME_FIELD = 'business_email'
EMAIL_FIELD = 'business_email'
REQUIRED_FIELDS = []
class Meta:
ordering = ['first_name', 'last_name']
permissions = (
("can_validate_grant_application", "Can validate grant application"),
("can_validate_fellowship", "can Validate Fellowship"),
("can_view_contacts", "Can view contacts"),
("can_edit_contacts", "can edit contacts"),
("can_delete_contacts", "can delete contacts"),
)
def __str__(self):
return self.get_full_name()
'''
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
'''
def has_perm(user, perm, obj=None):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_perm'):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_module_perms'):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
'''
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return True
'''
def get_full_name(self):
return str(self.first_name) +' '+str(self.last_name)+ ' ('+str(self.business_email)+')'
@property
def role(self):
roles = [group.name for group in self.groups.all()]
return ', '.join(roles)
def email_user(self, subject, message):
message = Mail(
from_email='[email protected]',
to_emails=self.business_email,
subject=subject,
html_content=message)
try:
sg = SendGridAPIClient(API_KEY) # os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e)
| [
"[email protected]"
] | |
fe95e473675f636834b33370b5a053d90ad799da | c7846ee0828539c2a2019928c1cbf3abd35665bf | /1861.py | 5591737795d0cc329e37af61c882a6104f49553c | [] | no_license | whiteblue0/sw_problems | 10476601c8d6d68d42e2f30af87fcde1e5dbbcc5 | 1cefc6236cccc20477bf4eadb458a0fd06b09126 | refs/heads/master | 2020-06-20T10:44:57.463275 | 2020-05-03T07:27:57 | 2020-05-03T07:27:57 | 197,098,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # 우,하,좌,상
dx = [1,0,-1,0]
dy = [0,1,0,-1]
def ispass(y,x):
return 0<=y<N and 0<=x<N
def findpath(sy,sx):
global result
que =[]
cnt = 1
# visited[sy][sx] = 1
que.append((sy,sx))
while que:
y,x = que.pop(0)
for i in range(4):
ny,nx = y+dy[i], x+dx[i]
if ispass(ny,nx) and (data[ny][nx] == data[y][x]+1):
cnt += 1
que.append((ny,nx))
if cnt >= result[1]:
result[1] = cnt
result[0] = data[sy][sx]
T = int(input())
for tc in range(1,T+1):
N = int(input())
data = [list(map(int,input().split())) for _ in range(N)]
nums = [0]*(N**2+1)
result = [N**2, 0]
for i in range(N):
for j in range(N):
nums[data[i][j]] = (i,j)
for i in range(N**2,0,-1):
sy,sx = nums[i][0], nums[i][1]
findpath(sy,sx)
print("#{}".format(tc),result[0],result[1])
| [
"[email protected]"
] | |
6b25a67d62f342c36269ccdf5ef1219aa9acdecf | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/85131b28f01c2734886292b5f908bd346db627d78ffb686c086516a89e3ff520/xml/parsers/expat/errors.py | e407b956b7e12afd5a9207878324138d166d668f | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,558 | py | # encoding: utf-8
# module xml.parsers.expat.errors calls itself pyexpat.errors
# from C:\Users\Doly\Anaconda3\lib\site-packages\numba\jitclass\_box.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to describe error conditions. """
# no imports
# Variables with simple values
XML_ERROR_ABORTED = 'parsing aborted'
XML_ERROR_ASYNC_ENTITY = 'asynchronous entity'
XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF = 'reference to external entity in attribute'
XML_ERROR_BAD_CHAR_REF = 'reference to invalid character number'
XML_ERROR_BINARY_ENTITY_REF = 'reference to binary entity'
XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING = 'cannot change setting once parsing has begun'
XML_ERROR_DUPLICATE_ATTRIBUTE = 'duplicate attribute'
XML_ERROR_ENTITY_DECLARED_IN_PE = 'entity declared in parameter entity'
XML_ERROR_EXTERNAL_ENTITY_HANDLING = 'error in processing external entity reference'
XML_ERROR_FEATURE_REQUIRES_XML_DTD = 'requested feature requires XML_DTD support in Expat'
XML_ERROR_FINISHED = 'parsing finished'
XML_ERROR_INCOMPLETE_PE = 'incomplete markup in parameter entity'
XML_ERROR_INCORRECT_ENCODING = 'encoding specified in XML declaration is incorrect'
XML_ERROR_INVALID_TOKEN = 'not well-formed (invalid token)'
XML_ERROR_JUNK_AFTER_DOC_ELEMENT = 'junk after document element'
XML_ERROR_MISPLACED_XML_PI = 'XML or text declaration not at start of entity'
XML_ERROR_NOT_STANDALONE = 'document is not standalone'
XML_ERROR_NOT_SUSPENDED = 'parser not suspended'
XML_ERROR_NO_ELEMENTS = 'no element found'
XML_ERROR_NO_MEMORY = 'out of memory'
XML_ERROR_PARAM_ENTITY_REF = 'illegal parameter entity reference'
XML_ERROR_PARTIAL_CHAR = 'partial character'
XML_ERROR_PUBLICID = 'illegal character(s) in public id'
XML_ERROR_RECURSIVE_ENTITY_REF = 'recursive entity reference'
XML_ERROR_SUSPENDED = 'parser suspended'
XML_ERROR_SUSPEND_PE = 'cannot suspend in external parameter entity'
XML_ERROR_SYNTAX = 'syntax error'
XML_ERROR_TAG_MISMATCH = 'mismatched tag'
XML_ERROR_TEXT_DECL = 'text declaration not well-formed'
XML_ERROR_UNBOUND_PREFIX = 'unbound prefix'
XML_ERROR_UNCLOSED_CDATA_SECTION = 'unclosed CDATA section'
XML_ERROR_UNCLOSED_TOKEN = 'unclosed token'
XML_ERROR_UNDECLARING_PREFIX = 'must not undeclare prefix'
XML_ERROR_UNDEFINED_ENTITY = 'undefined entity'
XML_ERROR_UNEXPECTED_STATE = 'unexpected parser state - please send a bug report'
XML_ERROR_UNKNOWN_ENCODING = 'unknown encoding'
XML_ERROR_XML_DECL = 'XML declaration not well-formed'
__loader__ = None
__spec__ = None
# no functions
# no classes
# variables with complex values
codes = {
'XML declaration not well-formed': 30,
'XML or text declaration not at start of entity': 17,
'asynchronous entity': 13,
'cannot change setting once parsing has begun': 26,
'cannot suspend in external parameter entity': 37,
'document is not standalone': 22,
'duplicate attribute': 8,
'encoding specified in XML declaration is incorrect': 19,
'entity declared in parameter entity': 24,
'error in processing external entity reference': 21,
'illegal character(s) in public id': 32,
'illegal parameter entity reference': 10,
'incomplete markup in parameter entity': 29,
'junk after document element': 9,
'mismatched tag': 7,
'must not undeclare prefix': 28,
'no element found': 3,
'not well-formed (invalid token)': 4,
'out of memory': 1,
'parser not suspended': 34,
'parser suspended': 33,
'parsing aborted': 35,
'parsing finished': 36,
'partial character': 6,
'recursive entity reference': 12,
'reference to binary entity': 15,
'reference to external entity in attribute': 16,
'reference to invalid character number': 14,
'requested feature requires XML_DTD support in Expat': 25,
'syntax error': 2,
'text declaration not well-formed': 31,
'unbound prefix': 27,
'unclosed CDATA section': 20,
'unclosed token': 5,
'undefined entity': 11,
'unexpected parser state - please send a bug report': 23,
'unknown encoding': 18,
}
messages = {
1: 'out of memory',
2: 'syntax error',
3: 'no element found',
4: 'not well-formed (invalid token)',
5: 'unclosed token',
6: 'partial character',
7: 'mismatched tag',
8: 'duplicate attribute',
9: 'junk after document element',
10: 'illegal parameter entity reference',
11: 'undefined entity',
12: 'recursive entity reference',
13: 'asynchronous entity',
14: 'reference to invalid character number',
15: 'reference to binary entity',
16: 'reference to external entity in attribute',
17: 'XML or text declaration not at start of entity',
18: 'unknown encoding',
19: 'encoding specified in XML declaration is incorrect',
20: 'unclosed CDATA section',
21: 'error in processing external entity reference',
22: 'document is not standalone',
23: 'unexpected parser state - please send a bug report',
24: 'entity declared in parameter entity',
25: 'requested feature requires XML_DTD support in Expat',
26: 'cannot change setting once parsing has begun',
27: 'unbound prefix',
28: 'must not undeclare prefix',
29: 'incomplete markup in parameter entity',
30: 'XML declaration not well-formed',
31: 'text declaration not well-formed',
32: 'illegal character(s) in public id',
33: 'parser suspended',
34: 'parser not suspended',
35: 'parsing aborted',
36: 'parsing finished',
37: 'cannot suspend in external parameter entity',
}
| [
"[email protected]"
] | |
60f18d36c09eaf99c386081dad6ff20c40293d18 | 66c94b5e427c0b8f8f7101de9c17af1423f00682 | /keras2/keras66_3_hyper_lstm.py | b669f1052d34a3af94193a216b6ad0a57a9fccf6 | [] | no_license | NamkyuHan/bit_seoul | a34ea3c49666ee2183026e960e45092778643d55 | 3112eb576089cdf906c4f326337b4d2b5e5e4c29 | refs/heads/master | 2023-01-30T19:02:53.323592 | 2020-12-17T01:05:17 | 2020-12-17T01:05:17 | 311,277,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | import numpy as np
from tensorflow.keras.models import load_model, Sequential, Model
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Flatten, Input, LSTM
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dropout
####1.데이터
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# x_predict=x_test[:10, :, :, :]
x_train = x_train.reshape(60000, 196, 4).astype('float32')/255.
x_test = x_test.reshape(10000, 196, 4).astype('float32')/255.
# x_predict=x_predict.astype('float32')/255.
# 원 핫 인코딩
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
#2. 모델 구성
def build_model(drop=0.5, optimizer='adam'):
inputs = Input(shape=(196,4), name='input')
x = LSTM(30, activation='relu', name='hidden1')(inputs)
x = Dropout(drop)(x)
x = Dense(20, activation='relu', name='hidden2')(x)
x = Dropout(drop)(x)
x = Dense(10, activation='relu', name='hidden3')(x)
x = Dropout(drop)(x)
outputs = Dense(10, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=optimizer, metrics=['acc'], loss='categorical_crossentropy')
return model
# 이 함수 부분이 중요하다 파라미터를 지정해주는 함수니까
# 노드의 갯수와 파라미터 값을 설정할 수 있다 그러므로 위의 모델에서 레이어를 적절히 구성해야 한다
def create_hyperparameters():
batchs = [10] #[10, 20, 30, 40, 50] # [10]
optimizers = ('rmsprop', 'adam', 'adadelta') #['rmsprop']
# dropout = np.linspace(0.1, 0.5, 5)
dropout = [0.1, 0.5, 5]
return{'batch_size' : batchs, "optimizer" : optimizers, "drop" : dropout}
hyperparamaters = create_hyperparameters()
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier #케라스를 사이킷런으로 감싸겠다
model = KerasClassifier(build_fn=build_model, verbose=1)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
search = RandomizedSearchCV(model, hyperparamaters, cv=3)
search.fit(x_train, y_train)
print(search.best_params_)
acc = search.score(x_test, y_test)
print("최종 스코어 : ", acc)
# 그리드 서치를 랜덤 서치로 바꿔보자
'''
{'batch_size': 10, 'drop': 0.5, 'optimizer': 'rmsprop'}
1000/1000 [==============================] - 15s 15ms/step - loss: 301713288744930553888768.0000 - acc: 0.2320
최종 스코어 : 0.23199999332427979
''' | [
"[email protected]"
] | |
d1f2e7077c1c1c2471d513266b67c1f5d7b0a264 | c1db0672199013000a2ce6328f6fe86776cbbacd | /hrmin.py | 8de3dcaa9f909a47a990713759ea2f54aa49d8dd | [] | no_license | swarnanjali/python-programes | 0db3aa10969ca2c3d925689e29ffa7f48dbc137e | aa08d67da9b2ac377335156796031cf40208bd3f | refs/heads/master | 2020-05-23T02:01:45.129247 | 2019-07-19T07:01:57 | 2019-07-19T07:01:57 | 186,594,991 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | a=int(input())
b=a/60
c=a%60
print(int(b),end=" ")
print(c)
| [
"[email protected]"
] | |
40d92d8d26105c813762bbaaf0ca6165692e5266 | 8993469765f9e504c388eeba2c940f40ec971cb7 | /labs1/24.py | da9f4f061d5748aadf5564408ee46328ed80785b | [] | no_license | Sens3ii/ICT2020 | 293f8df77aeaba66b33c08540e037365b18246a7 | c0f68680b13534bd5697a9f95fae9dc7e2ed4089 | refs/heads/master | 2023-02-25T17:51:55.459518 | 2021-02-03T09:27:53 | 2021-02-03T09:27:53 | 296,038,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | Days = int(input())
Hours = int(input())
Minutes = int(input())
Seconds =int(input())
print(Seconds+Minutes*60+Hours*60*60+Days+60*60*24,"sec")
| [
"[email protected]"
] | |
02bfe76934653f7d299dcb398b654b19e587b33e | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/fdm_materials/scripts/check_material_profiles.py | bad82e1ed75536a491b3ea2ab4a6e5b63c09cfcc | [
"CC0-1.0",
"GPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 3,103 | py | # This script is dedicated to the public domain under the terms of the CC0 license.
from collections import OrderedDict
import os
import sys
import re
class MaterialProfilesValidator:
def __init__(self, root_dir: str):
self._repo_dir = os.path.abspath(root_dir)
self._materials_dir = self._repo_dir
self._guid_pattern = re.compile(r"<GUID>.*</GUID>")
def _get_guid(self, content: str) -> str:
guid = None
for line in content.splitlines():
line = line.strip()
if self._guid_pattern.match(line):
guid = line.strip("<GUID>").strip("</GUID>")
break
return guid
def get_materials_dir(self, dirpath: str):
for root_dir, dirnames, filenames in os.walk(dirpath):
has_materials_file = any(fn.endswith(".xml.fdm_material") for fn in filenames)
if not has_materials_file:
for dirname in dirnames:
full_dir_path = os.path.join(root_dir, dirname)
return self.get_materials_dir(full_dir_path)
return dirpath
## Validates the preset settings files and returns ``True`` or ``False``
# indicating whether there are invalid files.
def validate(self) -> bool:
# parse the definition file
guid_dict = OrderedDict()
materials_dir = self.get_materials_dir(self._materials_dir)
# go through all the preset settings files
for _, _, filenames in os.walk(materials_dir):
for filename in filenames:
file_path = os.path.join(materials_dir, filename)
if not filename.endswith(".xml.fdm_material"):
continue
with open(file_path, "r", encoding = "utf-8") as f:
content = f.read()
guid = self._get_guid(content)
if guid not in guid_dict:
guid_dict[guid] = []
item_list = guid_dict[guid]
item_list.append({"file_name": filename,
"file_path": file_path})
break
has_invalid_files = False
for guid, file_item_list in guid_dict.items():
if len(file_item_list) <= 1:
continue
has_invalid_files = True
if guid is not None:
print("-> The following files contain the same GUID [%s]:" % guid)
else:
print("-> The following files DO NOT contain any GUID:")
for file_item in file_item_list:
print(" -- [%s]" % file_item["file_name"])
print("-> PLEASE make sure to generate unique GUIDs for each material.")
return not has_invalid_files
if __name__ == "__main__":
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.abspath(os.path.join(script_dir, ".."))
validator = MaterialProfilesValidator(root_dir)
is_everything_validate = validator.validate()
ret_code = 0 if is_everything_validate else 1
sys.exit(ret_code)
| [
"[email protected]"
] | |
b06374f5b30ecf289a54899c6656ebc17d0ebfad | 18a281c772550d174fc903f35f70e27ee09bb89a | /web/config/wsgi.py | 8d0b154dc4018687b0e7031b5f7a4890542fefe5 | [] | no_license | quinceleaf/implementing-webhooks | 60ef33091a7254a509965cc4cc4de635709f8ec4 | 1cebfc3cdabd85e503fbdb60f418321a906b83ad | refs/heads/main | 2023-07-11T01:20:23.156674 | 2021-08-06T03:54:13 | 2021-08-06T03:54:13 | 393,219,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | """
WSGI config for web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
cfb0ba5f2f127c5af9c61326c3d79fe9574f9dd7 | 1760e87ada878d3d016b68eac4194701fada19d4 | /piGAN_lib/fid_evaluation.py | 82804971b482f250b54b48f9d505cd5926bca4ca | [
"MIT"
] | permissive | tonywork/CIPS-3D | 5fd04b56fafeb46e9f3396314dec1d6f302da740 | 9244193048c73f55270d2df28fb160f42d5953ad | refs/heads/main | 2023-08-26T04:52:40.484046 | 2021-11-01T06:17:12 | 2021-11-01T06:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | """
Contains code for logging approximate FID scores during training.
If you want to output ground-truth images from the training dataset, you can
run this file as a script.
"""
import os
import shutil
import torch
import copy
import argparse
from torchvision.utils import save_image
from pytorch_fid import fid_score
from tqdm import tqdm
import curriculums
# import datasets
from exp.pigan import datasets
def output_real_images(dataloader, num_imgs, real_dir):
img_counter = 0
batch_size = dataloader.batch_size
dataloader = iter(dataloader)
for i in range(num_imgs//batch_size):
real_imgs, _ = next(dataloader)
for img in real_imgs:
save_image(img, os.path.join(real_dir, f'{img_counter:0>5}.jpg'), normalize=True, range=(-1, 1))
img_counter += 1
def setup_evaluation(dataset_name, generated_dir, target_size=128, num_imgs=8000, outdir=None, **kwargs):
# Only make real images if they haven't been made yet
if outdir:
real_dir = os.path.join(outdir, 'evaluation', dataset_name + '_real_images_' + str(target_size))
else:
real_dir = os.path.join('EvalImages', dataset_name + '_real_images_' + str(target_size))
if os.path.exists(real_dir) and len(os.listdir(real_dir)) == 0:
os.rmdir(real_dir)
if not os.path.exists(real_dir):
os.makedirs(real_dir)
dataloader, CHANNELS = datasets.get_dataset(dataset_name, img_size=target_size, shuffle=False, **kwargs)
print('outputting real images...')
output_real_images(dataloader, num_imgs, real_dir)
print('...done')
if generated_dir is not None:
os.makedirs(generated_dir, exist_ok=True)
return real_dir
def output_images(generator, input_metadata, rank, world_size, output_dir, num_imgs=2048):
metadata = copy.deepcopy(input_metadata)
metadata['img_size'] = 128
metadata['batch_size'] = 4
metadata['h_stddev'] = metadata.get('h_stddev_eval', metadata['h_stddev'])
metadata['v_stddev'] = metadata.get('v_stddev_eval', metadata['v_stddev'])
metadata['sample_dist'] = metadata.get('sample_dist_eval', metadata['sample_dist'])
metadata['psi'] = 1
img_counter = rank
generator.eval()
img_counter = rank
if rank == 0: pbar = tqdm("generating images", total = num_imgs)
with torch.no_grad():
while img_counter < num_imgs:
z = torch.randn((metadata['batch_size'], generator.module.z_dim), device=generator.module.device)
generated_imgs, _ = generator.module.staged_forward(z, **metadata)
for img in generated_imgs:
save_image(img, os.path.join(output_dir, f'{img_counter:0>5}.jpg'), normalize=True, range=(-1, 1))
img_counter += world_size
if rank == 0: pbar.update(world_size)
if rank == 0: pbar.close()
def calculate_fid(dataset_name, generated_dir, target_size=256, outdir=None):
if outdir:
real_dir = os.path.join(outdir, 'evaluation', dataset_name + '_real_images_' + str(target_size))
else:
real_dir = os.path.join('EvalImages', dataset_name + '_real_images_' + str(target_size))
fid = fid_score.calculate_fid_given_paths([real_dir, generated_dir], 128, 'cuda', 2048)
torch.cuda.empty_cache()
return fid
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='CelebA')
parser.add_argument('--img_size', type=int, default=128)
parser.add_argument('--num_imgs', type=int, default=8000)
opt = parser.parse_args()
real_images_dir = setup_evaluation(opt.dataset, None, target_size=opt.img_size, num_imgs=opt.num_imgs) | [
"[email protected]"
] | |
c016c10d81333bd078fbdb4fbe5d3567e161c4d5 | ea285978bd60c8de8783a729effa7c92eeeb98e8 | /DBcm.py | 31cf80fdf2abed2c7b846332bf60a70b6e17c242 | [] | no_license | detalikota/website1 | 2426667ff8e838e0c4609a6694795c22d088a59d | 7eb75c52697ed7fbe77a2b5178117049bdd1bdd6 | refs/heads/master | 2023-01-11T16:03:52.456786 | 2020-11-02T19:20:04 | 2020-11-02T19:20:04 | 304,236,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import mysql.connector
class UseDatabase:
def __init__(self, config: dict) -> None:
self.configuration = config
def __enter__(self) -> 'Cursor':
self.conn = mysql.connector.connect(**self.configuration)
self.cursor = self.conn.cursor()
return self.cursor
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.conn.commit()
self.cursor.close()
self.conn.close()
| [
"[email protected]"
] | |
ebe85d5a801812a0a216ab1b07e9a8219bd73772 | ee6183f60b9efd1ecb6c2ba3c364f2fe4c7b2292 | /src/kfg/version.py | 67d01bdcc8ff949ac8e85f4682d998bc553a964a | [
"MIT"
] | permissive | abingham/kfg | b17ec10d3fe725707e61ae53ba1e87f27e0bd928 | 3e2f65501c6c4af44c3ebf24066c8bf2e7724c44 | refs/heads/master | 2020-03-21T08:28:41.342466 | 2018-12-22T13:49:38 | 2018-12-22T13:58:52 | 21,937,932 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | __version__ = '2.0.0'
__version_info__ = __version__.split('.')
| [
"[email protected]"
] | |
f660dc9314f35d14fc9c3c1466520c7b209c5e3b | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /components/font_service/DEPS | a32f59c6a0f4be10e379efea183a5c36e3ed2e7d | [
"BSD-3-Clause"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 163 | include_rules = [
"+services/service_manager",
"+mojo/common",
"+mojo/public",
"+services/tracing/public/cpp",
"+skia",
"+third_party/skia/include",
]
| [
"[email protected]"
] | ||
4800326295c56e6096e38e7a9c096a795b4d9e22 | 02035d84092291ff6a691047e7a3709ea03dddd8 | /visbrain/objects/connect_obj.py | d4b1f907c3956bee368962c497327a866f2b9073 | [
"BSD-3-Clause"
] | permissive | abhiishekpal/visbrain | a8abaf9bc4434fcf694158ac6510d9f67925b9a7 | 824724656a5d890330c086541176a539b004766d | refs/heads/master | 2020-03-30T08:17:28.823302 | 2018-07-04T01:54:59 | 2018-07-04T01:56:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,998 | py | """Base class for objects of type connectivity."""
import numpy as np
from collections import Counter
from vispy import scene
from vispy.scene import visuals
from .visbrain_obj import VisbrainObject, CombineObjects
from ..utils import array2colormap, normalize, color2vb, wrap_properties
class ConnectObj(VisbrainObject):
"""Create a connectivity object.
Parameters
----------
name : string
The name of the connectivity object.
nodes : array_like
Array of nodes coordinates of shape (n_nodes, 3).
edges : array_like | None
Array of ponderations for edges of shape (n_nodes, n_nodes).
select : array_like | None
Array to select edges to display. This should be an array of boolean
values of shape (n_nodes, n_nodes).
line_width : float | 3.
Connectivity line width.
color_by : {'strength', 'count'}
Coloring method. Use 'strength' to color edges according to their
connection strength define by the edges input. Use 'count' to color
edges according to the number of connections per node.
custom_colors : dict | None
Use a dictionary to colorize edges. For example, {1.2: 'red',
2.8: 'green', None: 'black'} turn connections that have a 1.2 and 2.8
strength into red and green. All others connections are set to black.
alpha : float | 1.
Transparency level (if dynamic is None).
antialias : bool | False
Use smoothed lines.
dynamic : tuple | None
Control the dynamic opacity. For example, if dynamic=(0, 1),
strong connections will be more opaque than weaker connections.
cmap : string | 'viridis'
Colormap to use if custom_colors is None.
vmin : float | None
Lower threshold of the colormap if custom_colors is None.
under : string | None
Color to use for values under vmin if custom_colors is None.
vmin : float | None
Higher threshold of the colormap if custom_colors is None.
over : string | None
Color to use for values over vmax if custom_colors is None.
transform : VisPy.visuals.transforms | None
VisPy transformation to set to the parent node.
parent : VisPy.parent | None
Line object parent.
verbose : string
Verbosity level.
_z : float | 10.
In case of (n_sources, 2) use _z to specify the elevation.
kw : dict | {}
Optional arguments are used to control the colorbar
(See :class:`ColorbarObj`).
Notes
-----
List of supported shortcuts :
* **s** : save the figure
* **<delete>** : reset camera
Examples
--------
>>> import numpy as np
>>> from visbrain.objects import ConnectObj
>>> n_nodes = 100
>>> nodes = np.random.rand(n_nodes, 3)
>>> edges = np.random.uniform(low=-10., high=10., size=(n_nodes, n_nodes))
>>> select = np.logical_and(edges >= 0, edges <= 1.)
>>> c = ConnectObj('Connect', nodes, edges, select=select, cmap='inferno',
>>> antialias=True)
>>> c.preview(axis=True)
"""
###########################################################################
###########################################################################
# BUILT IN
###########################################################################
###########################################################################
def __init__(self, name, nodes, edges, select=None, line_width=3.,
color_by='strength', custom_colors=None, alpha=1.,
antialias=False, dynamic=None, cmap='viridis', clim=None,
vmin=None, vmax=None, under='gray', over='red',
transform=None, parent=None, verbose=None, _z=-10., **kw):
"""Init."""
VisbrainObject.__init__(self, name, parent, transform, verbose, **kw)
self._update_cbar_args(cmap, clim, vmin, vmax, under, over)
# _______________________ CHECKING _______________________
# Nodes :
assert isinstance(nodes, np.ndarray) and nodes.ndim == 2
sh = nodes.shape
self._n_nodes = sh[0]
assert sh[1] >= 2
pos = nodes if sh[1] == 3 else np.c_[nodes, np.full((len(self),), _z)]
self._pos = pos.astype(np.float32)
# Edges :
assert edges.shape == (len(self), len(self))
if not np.ma.isMA(edges):
mask = np.zeros(edges.shape, dtype=bool)
edges = np.ma.masked_array(edges, mask=mask)
# Select :
if isinstance(select, np.ndarray):
assert select.shape == edges.shape and select.dtype == bool
edges.mask = np.invert(select)
edges.mask[np.tril_indices(len(self), 0)] = True
self._edges = edges
# Colorby :
assert color_by in ['strength', 'count']
self._color_by = color_by
# Dynamic :
if dynamic is not None:
assert len(dynamic) == 2
self._dynamic = dynamic
# Custom color :
if custom_colors is not None:
assert isinstance(custom_colors, dict)
self._custom_colors = custom_colors
# Alpha :
assert 0. <= alpha <= 1.
self._alpha = alpha
# _______________________ LINE _______________________
self._connect = visuals.Line(name='ConnectObjLine', width=line_width,
antialias=antialias, parent=self._node,
connect='segments')
self._connect.set_gl_state('translucent')
self._build_line()
def __len__(self):
"""Get the number of nodes."""
return self._n_nodes
def update(self):
"""Update the line."""
self._connect.update()
def _build_line(self):
"""Build the connectivity line."""
# Build the line position (consecutive segments):
nnz_x, nnz_y = np.where(~self._edges.mask)
indices = np.c_[nnz_x, nnz_y].flatten()
line_pos = self._pos[indices, :]
# Color either edges or nodes :
if self._color_by == 'strength':
nnz_values = self._edges.compressed()
values = np.c_[nnz_values, nnz_values].flatten()
elif self._color_by == 'count':
node_count = Counter(np.ravel([nnz_x, nnz_y]))
values = np.array([node_count[k] for k in indices])
self._minmax = (values.min(), values.max())
if self._clim is None:
self._clim = self._minmax
# Get the color according to values :
if isinstance(self._custom_colors, dict): # custom color
if None in list(self._custom_colors.keys()): # {None : 'color'}
color = color2vb(self._custom_colors[None], length=len(values))
else: # black by default
color = np.zeros((len(values), 4), dtype=np.float32)
for val, col in self._custom_colors.items():
color[values == val, :] = color2vb(col)
else:
color = array2colormap(values, **self.to_kwargs())
color[:, -1] = self._alpha
# Dynamic color :
if self._dynamic is not None:
color[:, 3] = normalize(values.copy(), tomin=self._dynamic[0],
tomax=self._dynamic[1])
# Send data to the connectivity object :
self._connect.set_data(pos=line_pos, color=color)
def _get_camera(self):
"""Get the most adapted camera."""
d_mean = self._pos.mean(0)
dist = np.sqrt(np.sum(d_mean ** 2))
return scene.cameras.TurntableCamera(center=d_mean, scale_factor=dist)
###########################################################################
###########################################################################
# PROPERTIES
###########################################################################
###########################################################################
# ----------- LINE_WIDTH -----------
@property
def line_width(self):
"""Get the line_width value."""
return self._connect.width
@line_width.setter
@wrap_properties
def line_width(self, value):
"""Set line_width value."""
assert isinstance(value, (int, float))
self._connect._width = value
self.update()
# ----------- COLOR_BY -----------
@property
def color_by(self):
"""Get the color_by value."""
return self._color_by
@color_by.setter
@wrap_properties
def color_by(self, value):
"""Set color_by value."""
assert value in ['strength', 'count']
self._color_by = value
self._build_line()
# ----------- DYNAMIC -----------
@property
def dynamic(self):
"""Get the dynamic value."""
return self._dynamic
@dynamic.setter
@wrap_properties
def dynamic(self, value):
"""Set dynamic value."""
assert value is None or len(value) == 2
self._dynamic = value
self._build_line()
# ----------- ALPHA -----------
@property
def alpha(self):
"""Get the alpha value."""
return self._alpha
@alpha.setter
@wrap_properties
def alpha(self, value):
"""Set alpha value."""
assert 0. <= value <= 1.
self._connect.color[:, -1] = value
self._alpha = value
self.update()
class CombineConnect(CombineObjects):
"""Combine connectivity objects.
Parameters
----------
cobjs : ConnectObj/list | None
List of source objects.
select : string | None
The name of the connectivity object to select.
parent : VisPy.parent | None
Markers object parent.
"""
def __init__(self, cobjs=None, select=None, parent=None):
"""Init."""
CombineObjects.__init__(self, ConnectObj, cobjs, select, parent)
| [
"[email protected]"
] | |
d6b6bc26a9dbc253af21a1a6c574c56a67df447f | 50ca6df816baeeb59e2cfb0320d46d621df165d3 | /Python/201910/191023/cksession.py | c7b17dbe8534d4bc95694425e3887f9cc17743d5 | [] | no_license | 96no3/PythonStudy | 6606342e788c63ca35e2a6cf21a432fc5274a343 | 2bf21081dd4803c7f4702b6cfccbaca3d2aa6f7b | refs/heads/master | 2020-08-07T04:19:35.341606 | 2019-12-18T05:35:10 | 2019-12-18T05:35:10 | 213,292,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,941 | py | #!/usr/bin/env python3
# クッキーを使ったセッション
from http import cookies
import os,json
import datetime,random,hashlib
import cgitb
class CookieSession:
"""クッキーを使ったセッションのクラス"""
SESSION_ID = "CookieSessionId"
# セッションデータの保存先を指定 os.path.dirname()でパスのディレクトリ名を取得
SESSION_DIR = os.path.dirname(os.path.abspath(__file__)) + "/SESSION"
def __init__(self):
# セッションデータの保存パスを確認
if not os.path.exists(self.SESSION_DIR):
os.mkdir(self.SESSION_DIR)
# クッキーからセッションIDを得る
rc = os.environ.get("HTTP_COOKIE","")
self.cookie = cookies.SimpleCookie(rc)
if self.SESSION_ID in self.cookie:
self.sid = self.cookie[self.SESSION_ID].value
else:
# 初回の訪問ならセッションIDを生成する
self.sid = self.gen_sid()
# 保存してあるデータを読み出す
self.modified = False
self.values = {}
path = self.SESSION_DIR + "/" + self.sid
if os.path.exists(path):
with open(path,"r",encoding="utf-8") as f:
a_json = f.read()
# JSON形式のデータを復元
self.values = json.loads(a_json)
def gen_sid(self):
"""セッションIDを生成する"""
token = ":#sa$2jAiN"
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
rnd = random.randint(0,100000)
key = (token + now + str(rnd)).encode("utf-8")
sid = hashlib.sha256(key).hexdigest()
return sid
def output(self):
"""クッキーヘッダを書き出す"""
self.cookie[self.SESSION_ID] = self.sid
self.save_data()
return self.cookie.output()
def save_data(self):
"""セッションデータをファイルに書き出す"""
if not self.modified:
return
path = self.SESSION_DIR + "/" + self.sid
# JSON形式に変換して保存
a_json = json.dumps(self.values)
with open(path,"w",encoding="utf-8") as f:
f.write(a_json)
# 添字アクセスのための特殊メソッドの定義
def __getitem__(self,key):
return self.values[key]
def __setitem__(self,key,value):
self.modified = True
self.values[key] = value
def __contains__(self,key):
return key in self.values
def clear(self):
self.values = {}
if __name__ == "__main__":
cgitb.enable()
# 実行テスト(訪問カウンタの例)
ck = CookieSession()
counter = 1
if "counter" in ck:
counter = int(ck["counter"]) + 1
ck["counter"] = counter
print("Content-Type: text/html; charset=utf-8")
print(ck.output())
print("")
print("counter=",counter)
| [
"[email protected]"
] | |
ba17a8097673f1389fb4890fb8b41fcd93bd6d19 | 209aae9f40657d48461bed5e081c4f235f86090a | /2020/day11-2.py | b0cf3c89cba7f1a4458990d2cb39256d430e06a9 | [] | no_license | scheidguy/Advent_of_Code | 6e791132157179928e1415f49467ad221ef1e258 | fbc09e4d26502b9a77e0c8d2840b11ec85a3c478 | refs/heads/main | 2023-03-05T12:34:15.343642 | 2021-02-20T00:27:58 | 2021-02-20T00:27:58 | 329,106,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | import copy
def neighbors(R , C, grid):
num = 0
r = R; c = C
while r > 0: # north
r -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r < len(grid)-1: # south
r += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while c > 0: # west
c -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while c < len(grid[0])-1: # east
c += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r > 0 and c > 0: # northwest
r -= 1; c -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r > 0 and c < len(grid[0])-1: # northeast
r -= 1; c += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r < len(grid)-1 and c > 0: # southwest
r += 1; c -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r < len(grid)-1 and c < len(grid[0])-1: # southeast
r += 1; c += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
return num
f = open('day11-1_input.txt')
prevgrid = f.readlines()
f.close()
# encircle the grid with floor spaces to simplify processing
rows = len(prevgrid)
for i in range(rows):
prevgrid[i] = '.' + prevgrid[i].strip() + '.'
cols = len(prevgrid[0])
prevgrid.append(cols * '.')
prevgrid.insert(0, cols * '.')
nowgrid = copy.deepcopy(prevgrid)
rows = len(prevgrid)
unstable = True
while unstable:
for row in range(rows):
for col in range(cols):
seat = prevgrid[row][col]
if seat == 'L':
if row == 90 and col == 20:
print('')
neigh = neighbors(row, col, prevgrid)
if neigh == 0:
updated = list(nowgrid[row])
updated[col] = '#'
updated = "".join(updated)
nowgrid[row] = updated
elif seat == '#':
neigh = neighbors(row, col, prevgrid)
if neigh >= 5:
updated = list(nowgrid[row])
updated[col] = 'L'
updated = "".join(updated)
nowgrid[row] = updated
if prevgrid == nowgrid:
unstable = False
print(sum([row.count('#') for row in nowgrid]))
else: prevgrid = copy.deepcopy(nowgrid)
| [
"[email protected]"
] | |
99143087e6840facd871adef59dd5d9989058001 | 61673ab9a42f7151de7337608c442fa6247f13bb | /peewee/execute_sql-to-dictionary/main-2-list-comprehension.py | 11bdabfb5d57af845048800c1572af940ceac702 | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 491 | py |
# date: 2019.05.20
# author: bartłomiej 'furas' Burek
# https://stackoverflow.com/questions/56219520/peewee-how-do-i-execute-raw-query-and-map-it-to-the-dictionary/56219996#56219996
import peewee
db = peewee.MySQLDatabase('my_database', user='my_user', password='my_password')
cursor = db.execute_sql('show table status from my_database')
column_names = [x[0] for x in cursor.description]
all_tables = [dict(zip(column_names, row)) for row in cursor.fetchall()]
print(all_tables)
| [
"[email protected]"
] | |
7bca9e6f78163b6d2e52f659b87b9562245ae0f0 | cf7d96bdd34205ede987f0985dfc9e3ab415ee06 | /ad_bank_loan/voucher.py | 5e064d94136a648862eb9cf1db431f9e95edd32a | [] | no_license | hendrasaputra0501/btxjalan | afc93467d54a6f20ef6ac46f7359e964ad5d42a0 | d02bc085ad03efc982460d77f7af1eb5641db729 | refs/heads/master | 2020-12-30T11:02:05.416120 | 2017-07-31T01:34:08 | 2017-07-31T01:34:08 | 98,836,234 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | from osv import osv, fields
from tools.translate import _
import openerp.addons.decimal_precision as dp
import netsvc
from datetime import datetime
import time
from dateutil.relativedelta import relativedelta
class account_voucher_writeoff(osv.Model):
_inherit = "account.voucher.writeoff"
_columns = {
"interest_id" : fields.many2one("account.bank.loan.interest","Interest",ondelete="cascade"),
} | [
"[email protected]"
] | |
91466fad4434ae3cac50d7862221c1f502a79389 | c13b4b0d3ab59d76a91a0811c30110098f8e4e9d | /catalogo/views.py | 1b3105a16718c6cbae293edd2b91c715d5ddbea7 | [] | no_license | ryujiin/store | 8d44b1f70df28df855c8966b3e9b50c99d99c409 | dab4e586daa9162d0a5d2fef0b3856669fd2795c | refs/heads/master | 2021-01-23T01:41:43.503719 | 2017-05-31T00:29:33 | 2017-05-31T00:29:33 | 92,887,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | from django.shortcuts import render
from django.http import HttpResponse, Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
# Create your views here.
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated,IsAdminUser
from models import *
from serializers import *
from datetime import datetime, timedelta, time
how_many_days = 20
class CatalogoViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = ProductoSingleSereializer
ordering_fields = ('precio_sort', 'num_comentarios')
def get_queryset(self):
queryset = Producto.objects.filter(activo=True).order_by('-actualizado')
categoria = self.request.query_params.get('categoria', None)
slug = self.request.query_params.get('slug',None)
limite = self.request.query_params.get('limite',None)
if categoria:
if categoria == 'ofertas':
queryset = queryset.filter(en_oferta=True)
elif categoria == 'novedades':
queryset = queryset.filter(actualizado__gte=datetime.now()-timedelta(days=how_many_days))
else:
queryset = queryset.filter(categorias__slug=categoria)
if slug:
queryset = queryset.filter(slug=slug)
if limite:
queryset = queryset[:limite]
return queryset
class ListaProductosViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = ProductoListaSerializers
def get_queryset(self):
queryset = Producto.objects.filter(activo=True).order_by('-actualizado')
categoria = self.request.query_params.get('categoria', None)
slug = self.request.query_params.get('slug',None)
limite = self.request.query_params.get('limite',None)
if categoria:
if categoria == 'ofertas':
queryset = queryset.filter(en_oferta=True)
elif categoria == 'novedades':
queryset = queryset.filter(actualizado__gte=datetime.now()-timedelta(days=how_many_days))
else:
queryset = queryset.filter(categorias__slug=categoria)
if slug:
queryset = queryset.filter(slug=slug)
if limite:
queryset = queryset[:limite]
return queryset
#from drf_haystack.viewsets import HaystackViewSet
##aun no se usa la busqueda mas adelante derrepente
#class ProductoBusquedaView(HaystackViewSet):
#index_models = [Producto]
#serializer_class = ProductoBusquedaSerializer
class CategoriaViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = CategoriaSerializer
queryset = Categoria.objects.all()
#Vistas para la oficina
class ProductosOficinaViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = ProductoListaSerializer
permission_classes = (IsAdminUser,)
def get_queryset(self):
queryset = Producto.objects.filter(activo=True).order_by('-pk')
return queryset
class ProductoSingleEditableViewsets(viewsets.ModelViewSet):
serializer_class = ProductoSingleEditable
permission_classes = (IsAdminUser,)
def get_queryset(self):
queryset = Producto.objects.all().order_by('-pk')
return queryset
| [
"[email protected]"
] | |
2cc2e4c133cd02104d71c53eb5e1e727fac86306 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayInsUnderwritePolicyQueryModel.py | 37b60946a99f201e4b87ce3ea9ab5c476f897e8a | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,895 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsUnderwritePolicyQueryModel(object):
def __init__(self):
self._out_biz_no = None
self._policy_no = None
self._prod_code = None
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def policy_no(self):
return self._policy_no
@policy_no.setter
def policy_no(self, value):
self._policy_no = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
def to_alipay_dict(self):
params = dict()
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.policy_no:
if hasattr(self.policy_no, 'to_alipay_dict'):
params['policy_no'] = self.policy_no.to_alipay_dict()
else:
params['policy_no'] = self.policy_no
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsUnderwritePolicyQueryModel()
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'policy_no' in d:
o.policy_no = d['policy_no']
if 'prod_code' in d:
o.prod_code = d['prod_code']
return o
| [
"[email protected]"
] | |
07ec8a97e20250841881fe935a533613ac674c22 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/prime-big-253.py | 4df2497c7f4ca82b29a6f23247f3508acd85d1c8 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < $Exp:
if x % div == 0:
return False
div = div + 1
return True
def is_prime3(x:int, x2:int, x3:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
| [
"[email protected]"
] | |
dd95d06ea3f6f07d84040319aff47321d5d4a365 | fc0c01ffcbb20dfcdfe177f0f527bcea68bb0909 | /backend/home/migrations/0002_load_initial_data.py | f1bd274487616909656e29aa72105a5bae7c4c68 | [] | no_license | crowdbotics-apps/msm-mobile-0312145--16172 | e6736783118f61d3b928042da41c983a27011c91 | a4c5eef430966d7f3f528ff3c400cc86bb56cda0 | refs/heads/master | 2023-01-25T04:07:39.278417 | 2020-12-03T10:49:41 | 2020-12-03T10:49:41 | 318,110,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSM-mobile-0312145"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSM-mobile-0312145</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msm-mobile-0312145--16172.botics.co"
site_params = {
"name": "MSM-mobile-0312145",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
f7c3af69717fdd9fe8134409fada9606a5081c25 | d247d27a9e48b53db2ff298467ceddc47368d963 | /forExamples/SEA1stFlwithoutContinue.py | 6f170c8b6906b58deb5e547552e15e4a0ba6423f | [] | no_license | Panzl/PythonClass2016 | 4814fa6bb3eea33248199a2985684b9eb870bbf1 | e486d149ebeba7358d7f50cf390facd403559f9e | refs/heads/master | 2021-01-11T03:40:06.468815 | 2016-10-19T13:45:16 | 2016-10-19T13:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | for iFloor in range(1,6):
for iWing in range(100,401,100):
if not(iFloor==1 and iWing<300):
for iRoom in range(26):
wingRoom = iWing + iRoom
roomNumber = 'SEA ' + str(iFloor) + '.' + str(wingRoom)
print(roomNumber)
| [
"[email protected]"
] | |
25471bac763649727c460bd35d3e989530910df7 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_1/Python/bquark/fractiles.py | d40a361db6a8c357bb963ba9a04b429c9ff10cbb | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | import math
fi = open("D-large.in")
fo = open("D-large.out", "w")
line = next(fi)
T = int(line)
for t in range(T):
line = next(fi)
[K, C, S] = [int(x) for x in line.split(' ')]
smin = (K + C - 1) // C
y = ['IMPOSSIBLE']
if S >= smin:
y = []
k = 0
while k < K:
yy = 0
for c in range(C):
yy += K**(C-c-1) * min(k, K-1)
k += 1
y.append(yy+1)
fo.write("Case #" + str(t+1) + ": " + ' '.join([str(s) for s in y]) + "\n")
fi.close()
fo.close()
| [
"[email protected]"
] | |
76bbf0b45b41d05234904bd18feaf90dc6401f40 | 7589cdaaf2f3eba5854028732792c1ef16172eb7 | /plot_supp_mat_figures.py | c9e5cbc7c17c578b8024edda0adefac2ae9cb253 | [
"MIT"
] | permissive | IdoSpringer/TCR-PEP-Classification | 96ea67e81c053d9a8bf68dc6dc3db557e188be3b | 1cca1551ca71359239a5f5caea7f13ec01f4982b | refs/heads/master | 2020-04-05T00:33:05.187020 | 2019-06-26T09:01:55 | 2019-06-26T09:01:55 | 156,401,275 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import matplotlib.pyplot as plt
import compare_data_stats as cmp
from Kidera import kidera
import numpy as np
w = 'McPAS-TCR_with_V'
t = 'TCRGP_with_V'
nt = 'TCRGP_negs_with_V'
def kidera_hists(data1, data2):
factor_observations1 = [[] for i in range(10)]
with open(data1, 'r') as data:
for line in data:
line = line.split('\t')
tcr = line[0]
tcr = tcr[3:-1]
v = kidera.score_sequence(tcr)
v = v.values
for i in range(len(v)):
factor_observations1[i].append(v[i])
factor_observations2 = [[] for i in range(10)]
with open(data2, 'r') as data:
for line in data:
line = line.split('\t')
tcr = line[0]
tcr = tcr[3:-1]
v = kidera.score_sequence(tcr)
v = v.values
for i in range(len(v)):
factor_observations2[i].append(v[i])
fig = plt.figure()
for i in range(10):
ax = fig.add_subplot(2, 5, i+1)
a = factor_observations1[i]
b = factor_observations2[i]
weights1 = np.ones_like(a) / float(len(a))
weights2 = np.ones_like(b) / float(len(b))
bins = np.linspace(-1.0, 1.0, 10)
plot2 = ax.hist([t + 0.1 for t in b], weights=weights2, bins=[bin + 0.1 for bin in bins],
color='salmon', label='TCRGP', width=0.1)
plot1 = ax.hist(a, weights=weights1, bins=bins,
color='dodgerblue', label='McPAS', width=0.1)
ax.set_title('Kidera ' + str(i+1) + ' factor histogram')
ax.legend()
fig.tight_layout()
plt.show()
kidera_hists(w, nt)
| [
"[email protected]"
] | |
cf2437bbb631b33ae6e6807748210e78b731a4f1 | fb1d7f7dea35f992d5d7a80d2b76cb7ad12aec2f | /restart.py | 1e24de7e7333dcaa48507049b172766fde8db826 | [] | no_license | kohnakagawa/implicit_dpd | 1e418b038f7f4bc935f01e6403cca8b37334334d | 592640bd6a70193eeabd9614f86ac907c846a9d1 | refs/heads/master | 2021-01-20T20:37:03.061054 | 2017-09-04T07:25:55 | 2017-09-04T07:25:55 | 47,946,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | #!/usr/bin/env python
import glob
import os
import sys
import re
import shutil
def copy_with_log(src, dst):
print "%s -> %s" % (src, dst)
shutil.copy(src, dst)
def get_backup_number(root_dir, f_back_pattern):
backup_nums = set()
for f in os.listdir(root_dir):
f_number = re.search(f_back_pattern, f)
if f_number is None:
continue
matched_num = f_number.group(3)
backup_nums.add(matched_num)
if len(backup_nums) == 0:
return 0
else:
return int(max(backup_nums)) + 1
def make_backup(root_dir):
f_back_pattern = r'(\w+)\.(\w+)\.(\d+)'
num = get_backup_number(root_dir, f_back_pattern)
for f in os.listdir(root_dir):
sim_data = re.search(f_back_pattern, f)
if sim_data is not None: # skip backup files
continue
f_back = f + "." + str(num)
f = os.path.join(root_dir, f)
f_back = os.path.join(root_dir, f_back)
copy_with_log(f, f_back)
def make_init_config(root_dir):
init_config = os.path.join(root_dir, "init_config.xyz")
fin_config = os.path.join(root_dir, "fin_config.xyz")
if os.path.getsize(fin_config) == 0:
print "WARNING! there is no trajectory data in %s" % fin_config
sys.exit(1)
copy_with_log(fin_config, init_config)
def main(argv):
if len(argv) != 2:
print "Usage: %s root_dir" % argv[0]
sys.exit(1)
root_dir = argv[1]
make_backup(root_dir)
make_init_config(root_dir)
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
] | |
90f88061995145befde80c2bce56e91f3b03e14b | eaac679161dfd275964575193f82d24171321f43 | /setup.py | 654e245dd8752b93d1cecf50ae82bbfcf36ad6da | [
"MIT"
] | permissive | aagallag/pubg-python | 0ff3816cfcbeb185cc6a83ab04934bacd425badf | 42d1a16d332ee53d5ebe136293bfcd65d34a4da6 | refs/heads/master | 2020-03-11T13:11:13.745847 | 2018-04-16T15:08:22 | 2018-04-16T15:08:22 | 130,018,032 | 0 | 0 | null | 2018-04-18T06:58:26 | 2018-04-18T06:58:25 | null | UTF-8 | Python | false | false | 858 | py | from setuptools import (
find_packages,
setup)
setup(
name='pubg-python',
version='0.2.8',
description='A python wrapper for the PUBG developer API',
url='https://github.com/ramonsaraiva/pubg-python',
author='Ramon Saraiva',
author_email='[email protected]',
license='MIT',
packages=find_packages(exclude=('tests*',)),
install_requires=[
'requests>=2.18.4',
'furl>=1.0.1',
],
extras_require={
":python_version<='3.4'": ['enum34>=1.1.6'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
)
| [
"[email protected]"
] | |
cd3b531914b0f7e87c1fdf8e76c6404c03ae150d | 9bf522a1716339fe928e83c9b416eeebaa1421af | /aiida_lammps/calculations/lammps/force.py | 11f3143a6fb0dfaa6cc6ed3da3a21f84ab3c9b05 | [
"MIT"
] | permissive | zaidurrehman/aiida-lammps | 132ccf6f6bc2b8e2a81fa3f852a76c8bd3bdcedd | e00d5501778c918b4333747398d4ae4df46fd3eb | refs/heads/master | 2020-03-22T16:07:52.265272 | 2018-05-15T08:26:39 | 2018-05-15T08:26:39 | 140,304,946 | 0 | 0 | MIT | 2018-07-09T15:31:32 | 2018-07-09T15:25:24 | Python | UTF-8 | Python | false | false | 2,367 | py | from aiida.orm.calculation.job import JobCalculation
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.utils import classproperty
from aiida.orm import DataFactory
from aiida_lammps.calculations.lammps import BaseLammpsCalculation
from aiida_lammps.calculations.lammps.potentials import LammpsPotential
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
def generate_LAMMPS_input(parameters_data,
potential_obj,
structure_file='data.gan',
trajectory_file='trajectory.lammpstr'):
names_str = ' '.join(potential_obj._names)
lammps_input_file = 'units metal\n'
lammps_input_file += 'boundary p p p\n'
lammps_input_file += 'box tilt large\n'
lammps_input_file += 'atom_style atomic\n'
lammps_input_file += 'read_data {}\n'.format(structure_file)
lammps_input_file += potential_obj.get_input_potential_lines()
lammps_input_file += 'neighbor 0.3 bin\n'
lammps_input_file += 'neigh_modify every 1 delay 0 check no\n'
lammps_input_file += 'dump aiida all custom 1 {0} element fx fy fz\n'.format(trajectory_file)
lammps_input_file += 'dump_modify aiida format line "%4s %16.10f %16.10f %16.10f"\n'
lammps_input_file += 'dump_modify aiida sort id\n'
lammps_input_file += 'dump_modify aiida element {}\n'.format(names_str)
lammps_input_file += 'run 0'
return lammps_input_file
class ForceCalculation(BaseLammpsCalculation, JobCalculation):
_OUTPUT_TRAJECTORY_FILE_NAME = 'trajectory.lammpstrj'
_OUTPUT_FILE_NAME = 'log.lammps'
def _init_internal_params(self):
super(ForceCalculation, self)._init_internal_params()
self._default_parser = 'lammps.force'
self.__retrieve_list = []
self._generate_input_function = generate_LAMMPS_input
self._retrieve_list = [self._OUTPUT_TRAJECTORY_FILE_NAME, self._OUTPUT_FILE_NAME]
@classproperty
def _use_methods(cls):
"""
Extend the parent _use_methods with further keys.
"""
retdict = JobCalculation._use_methods
retdict.update(BaseLammpsCalculation._baseclass_use_methods)
return retdict
| [
"[email protected]"
] | |
5bb3365c6f397cdc145999193c20452bf0d67692 | 3cf0d750948a758d5771dd778fbb783d64a044ae | /src/basic/web/flask/01hello/do_flask.py | 7765c119d4516759757f524df238b1c5042fa794 | [
"CC-BY-NC-SA-4.0",
"Apache-2.0"
] | permissive | hbulpf/pydemo | 6552a08b3c85721ac1b2ba335b030e234ad03b6c | ea3e9f9086116a86ecef803e9e3179a34c94c20f | refs/heads/master | 2022-11-30T21:06:29.933820 | 2022-01-15T17:05:16 | 2022-01-15T17:05:16 | 237,584,300 | 6 | 1 | Apache-2.0 | 2022-11-22T09:49:38 | 2020-02-01T08:20:43 | Python | UTF-8 | Python | false | false | 795 | py | from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return '<h1>Home</h1>'
@app.route('/signin', methods=['GET'])
def signin_form():
return '''<form action="/signin" method="post">
<p>username:<input name="username"></p>
<p>password:<input name="password" type="password"></p>
<p><button type="submit">Sign In</button></p>
</form>'''
@app.route('/signin', methods=['POST'])
def signin():
# 需要从request对象读取表单内容:
if request.form['username']=='admin' and request.form['password']=='password':
return '<h3>Hello, admin!</h3>'
return '<h3>Bad username or password.</h3>'
if __name__ == '__main__':
app.run() | [
"[email protected]"
] | |
f26a95fe54f735144ba448059a58f6002ca39c7d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ZF6vZwPc5He5u5EFe_3.py | 5eebd4882cfb06abfccf72ab693296cfd70bfd75 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py |
def is_first_superior(l1, l2):
for i in range(len(l1)):
if l1[i] > l2[i]:
return True
return False
| [
"[email protected]"
] | |
54bec16a1e8a091fb14b3314055c0bfc1ade59c2 | 08cfe7ccf78f098924efdcf0db72f32d56e995fe | /envosx/bin/pip | d805a53a4465a5e6982e53bfb89d17841e9f7151 | [] | no_license | carloxdev/veritas | 6709031a020801181dc81751133433adc96dfb71 | e91e42545b7c42b8fd5090f58572715c3f653095 | refs/heads/master | 2020-04-21T23:31:32.031949 | 2019-02-10T06:25:27 | 2019-02-10T06:25:27 | 169,949,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | #!/Users/carloxdev/Files/Trabajo/Sintaxyz/Proyectos/Veritas/envosx/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a074006476a8da1ae12af9f69f15ca373f71bc50 | ded46c3a86c2a70328a63d779ac038d636ae5906 | /_WSpython/Python01_11_pass01_최임정.py | db594047605bfbd23f11e50803e515dbfa609ffa | [] | no_license | imjoung/hongik_univ | 82d0e7ea31763713f51bbde9d45e4aae5cb73849 | 82a3a77605d74d13eb76b915b215f6e245968180 | refs/heads/main | 2023-06-24T12:49:46.087083 | 2021-07-15T06:31:57 | 2021-07-15T06:31:57 | 379,128,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | vId = "Red"
Pwd = "1234"
if vId == "Red" and Pwd == "1234":
pass
else:
pass | [
"[email protected]"
] | |
9d69eb0935be8ecd1893a4b8bbbb712dbb6b2c3f | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_exp/trend_linear/cycle_12/ar_/test_artificial_1024_exp_linear_12__0.py | d3250a459fa582199ac7047cea2b5bd10721479e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = 12, transform = "exp", sigma = 0.0, exog_count = 0, ar_order = 0);
art.process_dataset(dataset); | [
"[email protected]"
] | |
4abc683ed81e9286e0ff9350feee1b386ac7ffe5 | 4cbf572b446af438249911e2b07ae873234609df | /examples/postman_echo/request_methods/hardcode_test.py | e60254c428b1d0552596a765052181cb6c195cab | [
"Apache-2.0"
] | permissive | jeremy8250/httprunner | 0a1d164c18df43bf65754130615bab8a91b14862 | a40c7892f3666dd1de200e53ecd5cee9fa8a68ee | refs/heads/master | 2022-07-04T08:20:01.979326 | 2020-05-18T02:47:56 | 2020-05-18T02:47:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | # NOTICE: Generated By HttpRunner. DO'NOT EDIT!
# FROM: examples/postman_echo/request_methods/hardcode.yml
from httprunner import HttpRunner, TConfig, TStep
class TestCaseHardcode(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase in hardcode",
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/hardcode_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "bar1", "foo2": "bar2"},
"headers": {"User-Agent": "HttpRunner/3.0"},
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post raw text",
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body.",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post form data",
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "application/x-www-form-urlencoded",
},
"data": "foo1=bar1&foo2=bar2",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "put request",
"request": {
"method": "PUT",
"url": "/put",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body.",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
]
if __name__ == "__main__":
TestCaseHardcode().test_start()
| [
"[email protected]"
] | |
2c7f7315aae320bc1caeb77516b600a04ca3a90f | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/keystone/conf/extra_headers.py | 247d879764f82dcec291593525b0478080abbdef | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 960 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from keystone.conf import utils
distribution = cfg.StrOpt(
'Distribution',
default='Ubuntu',
help=utils.fmt("""
Specifies the distribution of the keystone server.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
distribution,
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
| [
"[email protected]"
] | |
2d0b99e0bd8d4068049d46c68973cd8748ee3dd6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_drove.py | 1f3fa8910c98a175c0f442468e208abbc398f454 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py |
#calss header
class _DROVE():
def __init__(self,):
self.name = "DROVE"
self.definitions = [u'to move farm animals on foot from one place to another']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
7832c84edda98ed4bb859c2ab6f18914aa2b921f | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stacksNqueues_20200722083944.py | ba6752c16a92b3ce035e1d4a9eb516d58206529d | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | # we'll use a list to rep stack and a queue
# empty list
stack = []
stack.append(1) | [
"[email protected]"
] | |
b8beb75f1384cd86e0be5cf1fca0bff678731ddc | b4fc645746dd2a88a951acea06db91eae1d0eda4 | /fluent_blogs/__init__.py | e531338f42cc1e7e73c6966faff30d43d6e2196f | [
"Apache-2.0"
] | permissive | nishchintg01/django-fluent-blogs | 40a2a5c25f0afbdbb08af14852af5a128e564e75 | 86b148549a010eaca9a2ea987fe43be250e06c50 | refs/heads/master | 2020-06-13T16:45:11.847957 | 2018-07-31T10:52:10 | 2018-07-31T10:52:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # following PEP 440
__version__ = "2.0.3"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
| [
"[email protected]"
] | |
50a51ca5d16e357fec283b6b5598b557ab490c41 | 63044bff27a0cf40ae9bd203e12562386b0fc925 | /courses/management/commands/update.py | b323a77606477d99af35698445c40cbe516b99fb | [
"BSD-3-Clause"
] | permissive | afg-archive/nthucourses | 696f208b57b3f870fdae6a87030804fb70cda080 | 9f28f8e9480b9d7a9db1f9c023955fb23b1a28aa | refs/heads/master | 2021-05-29T01:46:36.882086 | 2015-05-29T10:51:13 | 2015-05-29T10:51:13 | 36,482,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.core.management.base import BaseCommand
import sys
from courses import adapter
from logs.models import Logger
class Command(BaseCommand):
help = 'update stuff'
def add_arguments(self, parser):
parser.add_argument('count', type=int)
def handle(self, *args, **options):
with Logger(' '.join(sys.argv[1:])):
adapter.update_n(options['count'])
| [
"[email protected]"
] | |
3b9a978a922a5b196bc117898814b0cbec445d6c | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20180118/example_pandas/x.py | b249ecd7574e9204bf7e358494db3c0a1f66cdf8 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 173 | py | import pandas as pd
ax = df.plot(kind="scatter", x="x", y="y", s=40)
for _, row in df.iterrows():
print(row.name, row.x, row.y)
ax.annotate(row.name, row.x, row.y)
| [
"[email protected]"
] | |
13f7f8b2ae731cf1aa60c04f77a6859b01c2e133 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2019_10_17/operations/_private_link_resources_operations.py | 9c81b625304ad9d1ab87cc18e3bfe196390cf5e3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 12,331 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_private_link_scope_request(
resource_group_name: str, scope_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-10-17-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateLinkResources",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"scopeName": _SERIALIZER.url("scope_name", scope_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, scope_name: str, group_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-10-17-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateLinkResources/{groupName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"scopeName": _SERIALIZER.url("scope_name", scope_name, "str"),
"groupName": _SERIALIZER.url("group_name", group_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.monitor.v2019_10_17.MonitorManagementClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_private_link_scope(
self, resource_group_name: str, scope_name: str, **kwargs: Any
) -> Iterable["_models.PrivateLinkResource"]:
"""Gets the private link resources that need to be created for a Azure Monitor PrivateLinkScope.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource. Required.
:type scope_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.monitor.v2019_10_17.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-10-17-preview"))
cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_private_link_scope_request(
resource_group_name=resource_group_name,
scope_name=scope_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_private_link_scope.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_private_link_scope.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateLinkResources"
}
@distributed_trace
def get(
self, resource_group_name: str, scope_name: str, group_name: str, **kwargs: Any
) -> _models.PrivateLinkResource:
"""Gets the private link resources that need to be created for a Azure Monitor PrivateLinkScope.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource. Required.
:type scope_name: str
:param group_name: The name of the private link resource. Required.
:type group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2019_10_17.models.PrivateLinkResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-10-17-preview"))
cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
scope_name=scope_name,
group_name=group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateLinkResources/{groupName}"
}
| [
"[email protected]"
] | |
edb1069fb26add639ea2a6658e393ab7ecaf60b5 | 94a12a005e01be982f3f5482f03c429b7710dd1c | /week-1/mongoProc_linux_x86_64/bin/pypy/lib-python/2.7/test/test_bytes.py | 2f219f562305ef8114406028e77ff83d358f844f | [
"LicenseRef-scancode-unicode",
"MIT",
"Apache-2.0"
] | permissive | RaviTezu/M202 | 9b80134817c7abf706a70968fbf9ced63f56ba80 | 62e7f19e3587cb2b8e62d3f8751a63f886f4294b | refs/heads/master | 2021-01-18T21:48:47.925607 | 2016-04-23T15:52:29 | 2016-04-23T15:52:29 | 19,467,383 | 6 | 9 | null | null | null | null | UTF-8 | Python | false | false | 43,974 | py | """Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.test_support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.test_support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
b = self.type2test(sample, "latin1", "ignore")
self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex(u'1a2B30'), b)
self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegexp(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
@test.test_support.impl_detail("undocumented bytes.__alloc__()")
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
@test.test_support.impl_detail(
"resizing semantics of CPython rely on refcounting")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
@test.test_support.impl_detail("resizing semantics", cpython=False)
def test_resize_forbidden_non_cpython(self):
# on non-CPython implementations, we cannot prevent changes to
# bytearrays just because there are buffers around. Instead,
# we get (on PyPy) a buffer that follows the changes and resizes.
b = bytearray(range(10))
for v in [memoryview(b), buffer(b)]:
b[5] = 99
self.assertIn(v[5], (99, chr(99)))
b[5] = 100
b += b
b += b
b += b
self.assertEquals(len(v), 80)
self.assertIn(v[5], (100, chr(100)))
self.assertIn(v[79], (9, chr(9)))
del b[10:]
self.assertRaises(IndexError, lambda: v[10])
self.assertEquals(len(v), 10)
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
| [
"[email protected]"
] | |
7534ead6011b0a4fd7811f049ab3bb60e196c09e | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnxp.py | ab13e8966380af64923340502c1a559c2db478bd | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 142 | py | ii = [('LeakWTI2.py', 13), ('LeakWTI3.py', 5), ('PettTHE.py', 1), ('KiddJAE.py', 6), ('LeakWTI4.py', 7), ('LeakWTI.py', 4), ('WilkJMC.py', 1)] | [
"[email protected]"
] | |
1c676289043ddc7f0d412e1cc3be5ddf29fa5bbd | 31a9a6cd0c6d06c612705a6d572f97d6a6ec7286 | /render.py | a077a40bd70d27e379908dc0a77abc7d79ca947c | [] | no_license | mikeboers/RenderHeatmap | ce9860f1f6d451f65442a566477b97079282c44d | 684a3fef7dc4c3fb0983eed69a983ceb9ecb473b | refs/heads/master | 2023-06-08T05:52:28.983104 | 2013-07-16T01:49:07 | 2013-07-16T01:49:07 | 11,429,803 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,259 | py | #!/usr/bin/env python
from __future__ import print_function
import ctypes as c
import functools
import os
import re
import sys
from subprocess import call, check_output
import prman
from mako.template import Template
def debug(*args, **kwargs):
kwargs['file'] = sys.stderr
print(*args, **kwargs)
_memo_store = {}
def memo(func):
@functools.wraps(func)
def memoized(*args, **kwargs):
arg_repr = [repr(x) for x in args]
arg_repr.extend('%s=%r' % x for x in sorted(kwargs.iteritems()))
spec = '%s(%s)' % (func.__name__, ', '.join(arg_repr))
try:
return _memo_store[spec]
except KeyError:
ret = _memo_store[spec] = func(*args, **kwargs)
return ret
return memoized
# Consider grabbing the Rix API to get the shader path. But for now, I'm just
# going to hard-code it...
# lib = c.CDLL(os.path.join(os.environ['RMANTREE'], 'lib/libprman.dylib'))
shader_path = ['shaders', os.path.join(os.environ['RMANTREE'], 'lib', 'shaders')]
@memo
def find_shader(name):
for dir_ in shader_path:
path = os.path.join(dir_, name) + '.slo'
if os.path.exists(path):
return path
@memo
def get_shader_methods(path):
return tuple(check_output(['sloinfo', '--methods', path]).strip().split())
@memo
def get_shader_parameters(path):
params = {}
last = None
for line in check_output(['sloinfo', path]).splitlines():
line = line.strip()
if not line:
continue
m = re.match(r'^"(.+?)" "parameter (\S+) (\S+)"$', line)
if m:
name, storage, type_ = m.groups()
last = name
params[name] = [storage, type_, None]
continue
m = re.match(r'^Default value: (.+?)$', line)
if m:
default = m.group(1)
params[last][2] = default
params = dict((key, tuple(value)) for key, value in params.iteritems())
return params
@memo
def wrap_shader(name):
path = find_shader(name)
if not path:
debug('wrap_shader: Could not find shader %r.' % (name, ))
return
methods = get_shader_methods(path)
params = get_shader_parameters(path)
wrapped_name = 'wrapped_%s' % name
wrapped_path = os.path.join('var', 'shaders', wrapped_name) + '.sl'
with open(wrapped_path, 'w') as fh:
template = Template(filename='wrapper.sl.mako')
fh.write(template.render(name=wrapped_name, params=params, methods=set(methods)))
call(['shader', '-Ilib', '-o', wrapped_path + 'o', wrapped_path])
return wrapped_name
class ShaderWrapper(prman.Rif):
def __init__(self, *args, **kwargs):
super(ShaderWrapper, self).__init__(*args, **kwargs)
self._coshader_count = 0
def Surface(self, name, kw):
wrapped = wrap_shader(name)
if wrapped:
self._coshader_count += 1
count = self._coshader_count
handle = '%s_%d' % (wrapped, count)
self.m_ri.Shader(name, handle, kw)
self.m_ri.Surface(wrapped, {'string wrapped_handle': handle})
ri = prman.Ri()
rif = ShaderWrapper(ri)
prman.RifInit([rif])
ri.Begin(ri.RENDER)
for path in sys.argv[1:]:
ri.ReadArchive(path)
ri.End()
| [
"[email protected]"
] | |
62c027779e60eb717cd1ecc9b3f065887d5c151d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /qNQkYzY8GpiFMmndh_9.py | 1c13b93ebc6d37c3186cdfd2d117379f9db81968 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py |
def join(lst):
output = lst.pop(0)
n = 0
for word in lst:
for i in range(1, len(output) + 1):
substring = output[-i:]
if word.startswith(substring):
output += word[i:]
if n == 0 or i < n:
n = i
break
else:
output += word
return [output, n]
| [
"[email protected]"
] | |
2ebff2ed86eca26adfa89e51abaad405fb03e6bc | 20e3aad102cf91991b5fb32741b1317e72d8c813 | /python/test/test_comm_type.py | 86d12fa87137f9ea6b498c182279b8357c1a4db3 | [
"Apache-2.0"
] | permissive | chathuriw/cylon | 32e04148a88aea49ff57258011557d036d39ebc0 | 4a4720e0233d3e67e67cff7dbc112e4d366d9fac | refs/heads/master | 2022-12-23T01:36:20.609934 | 2020-10-09T20:03:28 | 2020-10-09T20:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycylon.net.comm_type import CommType
print(CommType.MPI.value, CommType.UCX.value, CommType.TCP.value) | [
"[email protected]"
] | |
ab7cc7d498bcf6e430f5f3ee54ce21077a31f0f6 | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/qos/qos_policy_group_delete_iter_info.py | b9c1249c80dd536e9d9b77e35d7b894771e5a5ac | [
"MIT"
] | permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | from netapp.qos.qos_policy_group_info import QosPolicyGroupInfo
from netapp.netapp_object import NetAppObject
class QosPolicyGroupDeleteIterInfo(NetAppObject):
"""
Information about the deletion operation that was
attempted/performed against qos-policy-group object.
were not deleted due to some error.
due to some error.
This element will be returned only if input element
'return-failure-list' is true.
"""
_qos_policy_group_key = None
@property
def qos_policy_group_key(self):
"""
The keys for the qos-policy-group object to which the
deletion applies.
"""
return self._qos_policy_group_key
@qos_policy_group_key.setter
def qos_policy_group_key(self, val):
if val != None:
self.validate('qos_policy_group_key', val)
self._qos_policy_group_key = val
_error_code = None
@property
def error_code(self):
"""
Error code, if the deletion operation caused an error.
"""
return self._error_code
@error_code.setter
def error_code(self, val):
if val != None:
self.validate('error_code', val)
self._error_code = val
_error_message = None
@property
def error_message(self):
"""
Error description, if the operation caused an error.
"""
return self._error_message
@error_message.setter
def error_message(self, val):
if val != None:
self.validate('error_message', val)
self._error_message = val
@staticmethod
def get_api_name():
return "qos-policy-group-delete-iter-info"
@staticmethod
def get_desired_attrs():
return [
'qos-policy-group-key',
'error-code',
'error-message',
]
def describe_properties(self):
return {
'qos_policy_group_key': { 'class': QosPolicyGroupInfo, 'is_list': False, 'required': 'required' },
'error_code': { 'class': int, 'is_list': False, 'required': 'optional' },
'error_message': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| [
"[email protected]"
] | |
7996e73b8d40a38bcb034f1713d2b76544270a5a | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/skimage/util/setup.py | b9b8d3221b147d91483dc8bf4e7f656014dea973 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4915141ae116202354212e66d14926ed94771f1e4478f2a19210d8f9d010464e
size 1206
| [
"[email protected]"
] | |
382e99a6a5e95ffe23cf57a4cfd4f4c83d66325f | a52f9758ace9a85adfb735609e0cb8839aab2977 | /tests/sitetester | b1cfbddc4e01b1b26c0a31ef2a248c188e16be64 | [] | no_license | yadudoc/Swift | 8ec085c4297861c2197d504571e10dce2df961a4 | e96600ae9ce74f529a436d33d984534ca9566ee7 | refs/heads/master | 2021-01-22T09:09:37.081267 | 2011-07-25T08:47:03 | 2011-07-25T08:47:03 | 1,968,948 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | #!/usr/bin/env python
import commands
import os
# the workdir for a given site needs to exist on the site you're testing
# and should contain run-suite & suite.sh
# this script will ssh onto the site, pass run-suite the necesary variables
# for running suite.sh
class Site:
def __init__(self,login,logtype,workdir,queue,project):
self.login = login
self.logintype = logtype
self.workdir = workdir
self.queue = queue
self.project = project
def testSite(self,cogv,swiftv,test_type):
cmdline = self.logintype+" "+self.login+" "+self.workdir+"/run-suite.sh "+self.workdir+"/"+test_type+" "+self.workdir+" "+self.queue+" "+self.project+" "+cogv+" "+swiftv
print "running......"+cmdline
res = commands.getoutput(cmdline)
print res
cmdline2 = "scp -r "+s+":"+rundir+" ."
res2 = commands.getoutput(cmdline)
#---------------------main--------------------------------------------------------
sites = []
s_logins = ['[email protected]','login-abe.ncsa.teragrid.org','login1-qb.loni-lsu.teragrid.org']
logintype = ['ssh','gsissh','gsissh']
workdirs = ['/home/skenny/swift_runs/tests','/u/ac/skenny/swift_runs/tests','/home/skenny/swift_runs/tests']
queues = ['short','debug','workq']
projects = ['CI-IBN000039','TG-DBS080004N','TG-DBS080004N']
test_type = "groups/local-pbs-coasters.sh"
cogv = 'branches/4.1.8'
swiftv = 'branches/release-0.92'
rundir = commands.getoutput('date +\"%Y-%m-%d\"')
rundir = "run-"+rundir
for idx,s in enumerate(s_logins):
sites.append(Site(s,logintype[idx],workdirs[idx],queues[idx],projects[idx]))
# loop over all or run single
sites[1].testSite(cogv,swiftv,test_type)
sites[2].testSite(cogv,swiftv,test_type)
| [
"[email protected]"
] | ||
681c3498de14ed65faac6556b9a4a4fdd30bb14a | 23b9600c8a5afb6451902c3c9b3fd3a6ba9ed1e3 | /pycontour/cnt/tests/test_cnt_property.py | 671b185d4386656c8b6478b5f6fbb0f5be19fe84 | [
"Apache-2.0"
] | permissive | codingPingjun/pycontour | a822cef6f40f80b978b6e24db660d46c4e5e3660 | 892f42dd8569bcffe50433c32ca3bb414163a293 | refs/heads/master | 2020-03-29T10:37:48.033915 | 2018-09-21T18:17:24 | 2018-09-21T18:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # -*- coding: utf-8 -*-
import os, sys
from pycontour import point_list_to_np_arr
from pycontour import np_arr_to_cv_cnt
from pycontour.cnt import get_cnt_area
from pycontour.cnt import get_cnt_aspect_ratio
from pycontour.cnt import get_cnt_solidity
def test_property(cnt):
cnt_area = get_cnt_area(cnt)
cnt_aspect_ratio = get_cnt_aspect_ratio(cnt)
cnt_solidty = get_cnt_solidity(cnt)
print("Contour area is {}".format(cnt_area))
print("Contour aspect ratio is {}".format(cnt_aspect_ratio))
print("Contour solidity is {}".format(cnt_solidty))
if __name__ == "__main__":
point_list1 = [(0, 1), (1, 2), (2, 1), (1, 0)]
np_arr1 = point_list_to_np_arr(point_list1)
cnt1 = np_arr_to_cv_cnt(np_arr1)
test_property(cnt1)
point_list2 = [(0, 0), (1, 2), (2, 0), (1, 0), (1, -1)]
np_arr2 = point_list_to_np_arr(point_list2)
cnt2 = np_arr_to_cv_cnt(np_arr2)
test_property(cnt2)
| [
"[email protected]"
] | |
112b1c682d5f843710c2bb57bd848e533ac37db9 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/test/test_com_adobe_cq_social_commons_comments_scheduler_impl_search_scheduled_pos_properties.py | 1694f07b9f29ee9d009be5fff57d48e5001c1ead | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 1,487 | py | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_cq_social_commons_comments_scheduler_impl_search_scheduled_pos_properties import ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties(unittest.TestCase):
"""ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties(self):
"""Test ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_cq_social_commons_comments_scheduler_impl_search_scheduled_pos_properties.ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2391b4f2d82d25411949f36512c5b7942e8622d5 | c9094a4ed256260bc026514a00f93f0b09a5d60c | /homeassistant/util/process.py | 6f8bafda7a70aed91d6d24f7703edddc5045ef30 | [
"Apache-2.0"
] | permissive | turbokongen/home-assistant | 824bc4704906ec0057f3ebd6d92788e096431f56 | 4ab0151fb1cbefb31def23ba850e197da0a5027f | refs/heads/dev | 2023-03-12T05:49:44.508713 | 2021-02-17T14:06:16 | 2021-02-17T14:06:16 | 50,231,140 | 4 | 1 | Apache-2.0 | 2023-02-22T06:14:30 | 2016-01-23T08:55:09 | Python | UTF-8 | Python | false | false | 442 | py | """Util to handle processes."""
from __future__ import annotations
import subprocess
from typing import Any
# mypy: disallow-any-generics
def kill_subprocess(
# pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/4034
process: subprocess.Popen[Any],
) -> None:
"""Force kill a subprocess and wait for it to exit."""
process.kill()
process.communicate()
process.wait()
del process
| [
"[email protected]"
] | |
2198b029b2ef4637d0854116425b74aa541a43f0 | 7eedebb3a27d7507d773fae3bc8c9d35caaa7ece | /bruce.py | d834cd2961ee0ff93686ab06318aec168f23804c | [] | no_license | 40323248/40323248 | 87c24f94cc94c140f41cd83bde9278c18f881ce0 | 60296e94a7755307be4cd34c625bce240c87e907 | refs/heads/master | 2020-12-02T16:39:06.843510 | 2014-11-10T12:49:46 | 2014-11-10T12:49:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | print("hello bruce") | [
"[email protected]"
] | |
93dbb80bdcbe510bed8809ddb9090334d1b7cba5 | edbe6966098d925e831b4e3054c76e4ae1c1891a | /cluster/code/test/test_requester.py | 4ad4d87f316d1d3324176bd449314c690175eacf | [
"CC-BY-3.0",
"MIT",
"BSD-3-Clause"
] | permissive | Sean10/Algorithm_code | 242fcb21de97186ed1caea30ab967c3f4b4e9351 | 8ba923150102e16a9072b8f32ced45d15b18223b | refs/heads/master | 2023-06-22T17:47:07.241192 | 2023-06-19T15:22:23 | 2023-06-19T15:22:23 | 107,443,471 | 0 | 0 | BSD-3-Clause | 2021-06-08T20:35:47 | 2017-10-18T17:51:56 | C++ | UTF-8 | Python | false | false | 1,272 | py | from cluster import *
from . import utils
import mock
CLIENT_ID = 999999
class Tests(utils.ComponentTestCase):
def setUp(self):
super(Tests, self).setUp()
self.callback = mock.Mock(name='callback')
with mock.patch.object(Requester, 'client_ids') as client_ids:
client_ids.next.return_value = CLIENT_ID
self.req = Requester(self.node, 10, self.callback)
self.assertEqual(self.req.client_id, CLIENT_ID)
def test_function(self):
"""Requester should repeatedly send INVOKE until receiving a matching INVOKED"""
self.req.start()
self.assertMessage(['F999'], Invoke(caller='F999', client_id=CLIENT_ID, input_value=10))
self.network.tick(INVOKE_RETRANSMIT)
self.assertMessage(['F999'], Invoke(caller='F999', client_id=CLIENT_ID, input_value=10))
# non-matching
self.node.fake_message(Invoked(client_id=333, output=22))
self.network.tick(INVOKE_RETRANSMIT)
self.assertMessage(['F999'], Invoke(caller='F999', client_id=CLIENT_ID, input_value=10))
self.failIf(self.callback.called)
self.node.fake_message(Invoked(client_id=CLIENT_ID, output=20))
self.callback.assert_called_with(20)
self.assertUnregistered()
| [
"[email protected]"
] | |
7dabd080543a7db593ce58222f1664ce3b14a7f9 | e9c4239c8064d882691314fd5b37208f10447173 | /leetcode/252meetingRoom.py | 4cfdabad9de26d758a4eb4eebb2264529d6c4179 | [] | no_license | IronE-G-G/algorithm | 6f030dae6865b2f4ff4f6987b9aee06874a386c1 | 6f6d7928207534bc8fb6107fbb0d6866fb3a6e4a | refs/heads/master | 2020-09-21T03:02:20.908940 | 2020-03-22T15:19:41 | 2020-03-22T15:19:41 | 224,658,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | """
252 会议室
给定一个会议时间安排的数组,每个会议时间都会包括开始和结束的时间 [[s1,e1],[s2,e2],...] (si < ei),请你判断一个人是否能够参加这里面的全部会议。
示例 1:
输入: [[0,30],[5,10],[15,20]]
输出: false
示例 2:
输入: [[7,10],[2,4]]
输出: true
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/meeting-rooms
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
if not intervals:
return True
intervals.sort()
last_end = intervals[0][1]
for item in intervals[1:]:
if item[0]<last_end:
return False
last_end = item[1]
return True | [
"[email protected]"
] | |
51ef5cd2d9cfbed4111cec84b099160d93c11f59 | 9d418674a6cb6797656b15926f1f259964dabd71 | /jokedbapp/image_profiles/basic_vertical.py | d702ca3843eaed294094d238a45faea1dfc4b388 | [
"MIT"
] | permissive | BL-Labs/jokedbapp | f54a7aedded95591e1719ef19d6ae1f72bb6d73f | a0e03eefbf04255623a9ad81db145f1508fade5f | refs/heads/master | 2020-06-08T09:19:45.827254 | 2019-04-10T16:04:14 | 2019-04-10T16:04:14 | 23,437,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,369 | py | import cairo
DEFAULT = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1.1}
DEFAULT_SMALL = {'font': 'American Typewriter', 'size':22.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1.1}
BOLD = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_BOLD, 'LINEHEIGHT':1}
BOLD_SMALL = {'font': 'American Typewriter', 'size':22.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1}
EMPHASIS = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_OBLIQUE, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1}
TITLE = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_BOLD, 'LINEHEIGHT':1.3}
LAYOUT = {'_SIZE': (2,2), 'DEFAULT': (0,0), 'IMAGE': (1, 0), 'ATTRIB': (0,1)}
BACKGROUND_COLOUR = (0.2, 0.2, 0.2, 1.0) #RGBA
TEXTS = {'DEFAULT': DEFAULT,
'BOLD': BOLD,
'EMPHASIS': EMPHASIS,
'TITLE': TITLE,
'DEFAULT_SMALL': DEFAULT_SMALL,
'BOLD_SMALL': BOLD_SMALL,}
PARTS = {'DEFAULT': {'BACKGROUND': ((171.0/256.0), (140.0/256.0), (16.0/256.0), 1.0), 'FOREGROUND': (0.0, 0.0, 0.0, 1.0),
'TEXT': TEXTS, 'TEXTWIDTH':46, "MARGIN":(30,0)},
'IMAGE': {'BACKGROUND': ((171.0/256.0), (140.0/256.0), (16.0/256.0), 1.0), 'FOREGROUND': (0.0, 0.0, 0.0, 1.0),
'TEXT': TEXTS, 'TEXTWIDTH':None, "MARGIN":(0,0),
'IMAGES': ["/home/ben/jokedb/jokedbapp/imgs/01.png",
"/home/ben/jokedb/jokedbapp/imgs/02.png",
"/home/ben/jokedb/jokedbapp/imgs/03.png",
"/home/ben/jokedb/jokedbapp/imgs/04.png",
"/home/ben/jokedb/jokedbapp/imgs/05.png",
"/home/ben/jokedb/jokedbapp/imgs/06.png",
"/home/ben/jokedb/jokedbapp/imgs/07.png",
"/home/ben/jokedb/jokedbapp/imgs/08.png",
"/home/ben/jokedb/jokedbapp/imgs/09.png",
"/home/ben/jokedb/jokedbapp/imgs/10.png",
"/home/ben/jokedb/jokedbapp/imgs/11.png",
"/home/ben/jokedb/jokedbapp/imgs/12.png",
"/home/ben/jokedb/jokedbapp/imgs/13.png",
"/home/ben/jokedb/jokedbapp/imgs/14.png",
"/home/ben/jokedb/jokedbapp/imgs/15.png",
"/home/ben/jokedb/jokedbapp/imgs/16.png",
"/home/ben/jokedb/jokedbapp/imgs/17.png",
"/home/ben/jokedb/jokedbapp/imgs/18.png",
"/home/ben/jokedb/jokedbapp/imgs/19.png",
"/home/ben/jokedb/jokedbapp/imgs/20.png",
"/home/ben/jokedb/jokedbapp/imgs/21.png",
"/home/ben/jokedb/jokedbapp/imgs/22.png",
"/home/ben/jokedb/jokedbapp/imgs/23.png",
"/home/ben/jokedb/jokedbapp/imgs/24.png",
"/home/ben/jokedb/jokedbapp/imgs/25.png",
"/home/ben/jokedb/jokedbapp/imgs/26.png",
"/home/ben/jokedb/jokedbapp/imgs/27.png",
"/home/ben/jokedb/jokedbapp/imgs/28.png",
"/home/ben/jokedb/jokedbapp/imgs/29.png",
"/home/ben/jokedb/jokedbapp/imgs/30.png",
"/home/ben/jokedb/jokedbapp/imgs/31.png",
"/home/ben/jokedb/jokedbapp/imgs/32.png",
"/home/ben/jokedb/jokedbapp/imgs/33.png",
"/home/ben/jokedb/jokedbapp/imgs/34.png",
"/home/ben/jokedb/jokedbapp/imgs/35.png",
"/home/ben/jokedb/jokedbapp/imgs/36.png",
"/home/ben/jokedb/jokedbapp/imgs/37.png",
"/home/ben/jokedb/jokedbapp/imgs/38.png",
"/home/ben/jokedb/jokedbapp/imgs/39.png",
"/home/ben/jokedb/jokedbapp/imgs/40.png",
"/home/ben/jokedb/jokedbapp/imgs/41.png",
"/home/ben/jokedb/jokedbapp/imgs/42.png",
"/home/ben/jokedb/jokedbapp/imgs/43.png",
"/home/ben/jokedb/jokedbapp/imgs/44.png",
"/home/ben/jokedb/jokedbapp/imgs/45.png",
"/home/ben/jokedb/jokedbapp/imgs/46.png",
"/home/ben/jokedb/jokedbapp/imgs/47.png",
"/home/ben/jokedb/jokedbapp/imgs/48.png",
"/home/ben/jokedb/jokedbapp/imgs/49.png",
"/home/ben/jokedb/jokedbapp/imgs/50.png"]
},
'ATTRIB': {'BACKGROUND': ((158.0/256.0), (11.0/256.0), (15.0/256.0), 1.0), 'FOREGROUND': (1.0, 1.0, 1.0, 1.0),
'TEXT': TEXTS, 'TEXTWIDTH':40, "MARGIN":(50,0)},}
| [
"[email protected]"
] | |
68b2a186aa88e4263e2be029b45f32089b87e6ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03855/s164056215.py | bdc63f1ec0e4debd072fc610599656e89ad76b4e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | import sys
from collections import deque
from collections import defaultdict
from collections import Counter
def conn(n,m,e):
d=dict(zip(range(1,n+1),range(-1,(-1)*n-1,-1)))
td=defaultdict(lambda:deque([])) #tdは同値類がキーで中の元が値
c=1
for edge in e:
a=edge[0]
b=edge[1]
da=d[a] #da,dbはa,bの含まれる同値流のラベル
db=d[b]
if da<0 and db<0:
d[a]=c
d[b]=c
td[c].append(a)
td[c].append(b)
c+=1
elif da>0 and db<0:
d[b]=da
td[d[a]].append(b)
elif da<0 and db>0:
d[a]=db
td[d[b]].append(a)
elif da>0 and db>0 and da!=db:
for x in td[db]:
d[x]=da
td[da].append(x)
return list(d.values())
def components(n,k,e):
ed=defaultdict(lambda:deque())
for edge in e:
ed[edge[0]].append(edge[1])
c=0
s=[0]*n
label=[0]*n
for i in range(1,n+1):
if s[i-1]==0:
c+=1
label[c-1]=c
stack=deque([i])
while stack:
w=stack.pop()
s[w-1]=c
while ed[w]:
wn=ed[w].pop()
if s[wn-1]==0:
s[wn-1]=c
if ed[wn]:
stack.append(w)
w=wn
elif s[wn-1]<c:
label[s[wn-1]-1]=c
return [label[s[i]-1] for i in range(n)]
def components2(n,k,e):
ed=defaultdict(lambda:deque())
for edge in e:
ed[edge[0]].append(edge[1])
ed[edge[1]].append(edge[0])
c=0
s=[0]*n
for i in range(1,n+1):
if s[i-1]==0:
c+=1
stack=deque([i])
while stack:
w=stack.pop()
s[w-1]=c
while ed[w]:
wn=ed[w].pop()
if s[wn-1]==0:
s[wn-1]=c
if ed[wn]:
stack.append(w)
w=wn
return [s[i] for i in range(n)]
def main(n,k,l,e1,e2):
d1=components2(n,k,e1)
d2=components2(n,l,e2)
p=tuple(zip(iter(d1),iter(d2)))
d=Counter(p)
# print(d1,d2,d,p)
print(' '.join([str(d[x]) for x in p]))
if __name__=='__main__':
ssr=sys.stdin.readline
n,k,l=map(int,ssr().strip().split())
e1=[]
e2=[]
for _ in range(k):
e1.append(tuple(map(int,ssr().strip().split())))
for _ in range(l):
e2.append(tuple(map(int,ssr().strip().split())))
main(n,k,l,e1,e2)
| [
"[email protected]"
] | |
19516fddcf46da7c958112de4b4a48b588f34952 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02262/s633587482.py | d753ca2ac61eed71618d108cbb94d29de6373baa | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | from sys import stdin
def insertionSort(A, n, g):
global cnt
for i in range(g, n):
v = A[i]
j = i - g
while j >= 0 and A[j] > v:
A[j+g] = A[j]
j = j - g
cnt += 1
A[j+g] = v
def shellSort(A, n):
global cnt
cnt = 0
g = 1
G = [g]
while 3 * g + 1 < n:
g = 3 * g + 1
G.append(g)
m = len(G)
G.reverse()
print(m)
print(' '.join(map(str, G)))
for i in range(m):
insertionSort(A, n, G[i])
n = int(stdin.readline())
A = [int(stdin.readline()) for i in range(n)]
shellSort(A, n)
print(cnt)
for a in A:
print(a)
| [
"[email protected]"
] | |
17783be31b334cd82e6068aab17d8f98bec6cca8 | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/main_20171106171523.py | deb4ddd52f38171dcbe23d61cbf9d45aa121589b | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # DADSA - Assignment 1
# Reece Benson
from classes import Handler as Handler
from classes import Player as Player
from classes import Season as Season
from classes import Tournament as Tournament
from classes import Round as Round
from classes import Match as Match
class App():
def __hold__(self):
input(">>> Press <Return> to terminate the program")
exit()
def __main__(self):
handler = Handler.Handler()
# Hold the program
self.__hold__()
App() | [
"[email protected]"
] | |
bfaf7ad3a1c88c89c66f90f56a241c967f7662e2 | 4f0e26b19f9b97c2a62605c039440fa984ebaaba | /scripts/easy_install-2.6-script.py | 2c20b3f8a0a56b276e911c4435a3fbfc81403321 | [] | no_license | acmiyaguchi/buildbotve | 61ff08955997445a2b38032692d1ba0fcc9235e0 | 8f2806e1b83ff1df5f6f6313089c0d1d1f2fe288 | refs/heads/master | 2020-12-25T19:04:13.485076 | 2015-08-10T21:48:24 | 2015-08-10T21:48:24 | 40,506,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | #!c:\mozilla-build\buildbotve\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.14','console_scripts','easy_install-2.6'
__requires__ = 'distribute==0.6.14'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.14', 'console_scripts', 'easy_install-2.6')()
)
| [
"[email protected]"
] | |
6306027d406174be1fb961bc6ff1ffcd7c4b825f | b4afb44b8f483c048716fe12d778186ce68ac846 | /pages/ios/ffan/fei_fan_activity_page_configs.py | d12c7b41288668dca21a167d40b4743ce3b02f25 | [] | no_license | liu111xiao111/UItest | 64309b2c85f6d2334d64bb0875ba9ced459ebb1e | 67e2acc9a99da81022e286e8d8ec7ccb12636ff3 | refs/heads/master | 2021-09-01T18:30:28.044296 | 2017-12-28T04:36:46 | 2017-12-28T04:36:46 | 115,585,226 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
class FeiFanActivityPageConfigs(object):
'''
This is a configuration class for FeiFanActivityPage class.
'''
# Assert view time out
assert_view_timeout = 10
# Assert invalid view time out
assert_invalid_view_time = 3
# Click button time out
click_on_button_timeout = 10
# Fei fan activity title
name_fei_fan_activity_title_st = u"飞凡活动"
def __init__(self):
pass
| [
"[email protected]"
] | |
a622006114c11724974981c8fde6b7a6250f0085 | 57e6f45405452526945c34c43d42c8f8fdbf1de4 | /changeseeking_tracing/run_m6.py | fd887a5c4f2374d0d3d53b7e2b0b5f5750026482 | [] | no_license | mitroadmaps/map-maintainer | d48c4d58d4129672afb615674eb4718ca265a870 | b9e125830ed177f182bbd87d18e8b76946408e7b | refs/heads/master | 2023-08-29T13:00:09.442187 | 2021-11-02T23:52:43 | 2021-11-02T23:52:43 | 335,781,636 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,214 | py | from discoverlib import geom, graph
import model_m6a as model
import tileloader as tileloader
import numpy
import math
import os
import os.path
import random
import scipy.ndimage
import sys
import tensorflow as tf
import time
model_path = sys.argv[1]
old_tile_path = sys.argv[2]
new_tile_path = sys.argv[3]
graph_path = sys.argv[4]
angle_path = sys.argv[5]
MODEL_BASE = model_path
tileloader.REGIONS = ['mass']
tileloader.TRAINING_REGIONS = tileloader.REGIONS
tileloader.tile_dir = [
old_tile_path,
new_tile_path,
]
tileloader.graph_dir = graph_path
tileloader.angles_dir = angle_path
WINDOW_SIZE = 256
NUM_TRAIN_TILES = 1024
TILE_SIZE = 4096
RECT_OVERRIDE = None
NUM_BUCKETS = 64
MASK_NEAR_ROADS = False
tileloader.tile_size = 4096
tileloader.window_size = 256
tiles = tileloader.Tiles(2, 20, NUM_TRAIN_TILES+8, 'sat')
tiles.prepare_training()
train_tiles = list(tiles.train_tiles)
random.shuffle(train_tiles)
num_val = len(train_tiles)//10
val_tiles = train_tiles[0:num_val]
train_tiles = train_tiles[num_val:]
print('pick {} train tiles from {}'.format(len(train_tiles), len(tiles.train_tiles)))
# initialize model and session
print('initializing model')
m = model.Model(input_channels=3, bn=True)
session = tf.Session()
model_path = os.path.join(MODEL_BASE, 'model_latest/model')
best_path = os.path.join(MODEL_BASE, 'model_best/model')
if os.path.isfile(model_path + '.meta'):
print('... loading existing model')
m.saver.restore(session, model_path)
else:
print('... initializing a new model')
session.run(m.init_op)
def get_tile_rect(tile):
if RECT_OVERRIDE:
return RECT_OVERRIDE
p = geom.Point(tile.x, tile.y)
return geom.Rectangle(
p.scale(TILE_SIZE),
p.add(geom.Point(1, 1)).scale(TILE_SIZE)
)
def get_tile_example(tile, tries=10):
rect = get_tile_rect(tile)
# pick origin: must be multiple of the output scale
origin = geom.Point(random.randint(0, rect.lengths().x//4 - WINDOW_SIZE//4), random.randint(0, rect.lengths().y//4 - WINDOW_SIZE//4))
origin = origin.scale(4)
origin = origin.add(rect.start)
tile_origin = origin.sub(rect.start)
big_ims = tiles.cache.get_window(tile.region, rect, geom.Rectangle(tile_origin, tile_origin.add(geom.Point(WINDOW_SIZE, WINDOW_SIZE))))
if len(tileloader.get_tile_keys()) > 1:
inputs = [big_ims[key] for key in tileloader.get_tile_keys()]
#input = numpy.concatenate(inputs, axis=2).astype('float32') / 255.0
input = random.choice(inputs).astype('float32') / 255.0
else:
input = big_ims['input'].astype('float32') / 255.0
target = big_ims['angles'].astype('float32') / 255.0
if numpy.count_nonzero(target.max(axis=2)) < 64 and tries > 0:
#return get_tile_example(tile, tries - 1)
return None
example = {
'region': tile.region,
'origin': origin,
'input': input,
'target': target,
}
if MASK_NEAR_ROADS:
mask = target.max(axis=2) > 0
mask = scipy.ndimage.morphology.binary_dilation(mask, iterations=9)
example['mask'] = mask
return example
def get_example(traintest='train'):
while True:
if traintest == 'train':
tile = random.choice(train_tiles)
elif traintest == 'test':
tile = random.choice(val_tiles)
example = get_tile_example(tile)
if example is not None:
return example
val_examples = [get_example('test') for _ in range(2048)]
def vis_example(example, outputs=None):
x = numpy.zeros((WINDOW_SIZE, WINDOW_SIZE, 3), dtype='uint8')
x[:, :, :] = example['input'] * 255
x[WINDOW_SIZE//2-2:WINDOW_SIZE//2+2, WINDOW_SIZE//2-2:WINDOW_SIZE//2+2, :] = 255
gc = tiles.get_gc(example['region'])
rect = geom.Rectangle(example['origin'], example['origin'].add(geom.Point(WINDOW_SIZE, WINDOW_SIZE)))
for edge in gc.edge_index.search(rect):
start = edge.src.point
end = edge.dst.point
for p in geom.draw_line(start.sub(example['origin']), end.sub(example['origin']), geom.Point(WINDOW_SIZE, WINDOW_SIZE)):
x[p.x, p.y, 0:2] = 0
x[p.x, p.y, 2] = 255
for i in range(WINDOW_SIZE):
for j in range(WINDOW_SIZE):
di = i - WINDOW_SIZE//2
dj = j - WINDOW_SIZE//2
d = math.sqrt(di * di + dj * dj)
a = int((math.atan2(dj, di) - math.atan2(0, 1) + math.pi) * NUM_BUCKETS / 2 / math.pi)
if a >= NUM_BUCKETS:
a = NUM_BUCKETS - 1
elif a < 0:
a = 0
elif d > 100 and d <= 120 and example['target'] is not None:
x[i, j, 0] = example['target'][WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 1] = example['target'][WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 2] = 0
elif d > 70 and d <= 90 and outputs is not None:
x[i, j, 0] = outputs[WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 1] = outputs[WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 2] = 0
return x
def get_learning_rate(epoch):
if epoch < 100:
return 1e-4
else:
return 1e-5
best_loss = None
for epoch in range(200):
start_time = time.time()
train_losses = []
for _ in range(1024):
examples = [get_example('train') for _ in range(model.BATCH_SIZE)]
feed_dict = {
m.is_training: True,
m.inputs: [example['input'] for example in examples],
m.targets: [example['target'] for example in examples],
m.learning_rate: get_learning_rate(epoch),
}
if MASK_NEAR_ROADS:
feed_dict[m.mask] = [example['mask'] for example in examples]
_, loss = session.run([m.optimizer, m.loss], feed_dict=feed_dict)
train_losses.append(loss)
train_loss = numpy.mean(train_losses)
train_time = time.time()
val_losses = []
for i in range(0, len(val_examples), model.BATCH_SIZE):
examples = val_examples[i:i+model.BATCH_SIZE]
feed_dict = {
m.is_training: False,
m.inputs: [example['input'] for example in examples],
m.targets: [example['target'] for example in examples],
}
if MASK_NEAR_ROADS:
feed_dict[m.mask] = [example['mask'] for example in examples]
loss = session.run([m.loss], feed_dict=feed_dict)
val_losses.append(loss)
val_loss = numpy.mean(val_losses)
val_time = time.time()
print('iteration {}: train_time={}, val_time={}, train_loss={}, val_loss={}/{}'.format(epoch, int(train_time - start_time), int(val_time - train_time), train_loss, val_loss, best_loss))
m.saver.save(session, model_path)
if best_loss is None or val_loss < best_loss:
best_loss = val_loss
m.saver.save(session, best_path)
| [
"[email protected]"
] | |
9687d37ecb9adfe8cd3cd82ced27afb31f1bafd2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2653/60749/257218.py | 8275f8a1cd5eee9113e42a0a5bae23d48fe94bb8 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | n=int(input())
res=[]
for _ in range(n):
res.append(input().split(" "))
for h in res:
a=h[0]
b=h[1]
res=str((a-1)*(10-b))
print(res) | [
"[email protected]"
] | |
5eb4a0a88031d87a676379e40039387261a2a1cf | d967cf34a147f1bde1839fecfa1d356bb4c83d66 | /scripts/releaser_hooks.py | 5c115013ca4c09b6a6f7c21760ccdee4dbc72608 | [
"BSD-3-Clause"
] | permissive | Lessica/django-photologue | dbb44656b85c06f5c733ca50efb3595599b9387e | 3e2e7b3cf02ba396ccb063927513930a9a711036 | refs/heads/master | 2022-04-18T01:52:04.228366 | 2022-02-23T10:01:06 | 2022-02-23T10:01:06 | 256,437,446 | 0 | 0 | BSD-3-Clause | 2020-04-17T07:49:46 | 2020-04-17T07:49:45 | null | UTF-8 | Python | false | false | 3,436 | py | import os
import subprocess
try:
import polib
except ImportError:
print('Msg to the package releaser: prerelease hooks will not work as you have not installed polib.')
raise
import codecs
import copy
def prereleaser_before(data):
"""
1. Run the unit tests one last time before we make a release.
2. Update the CONTRIBUTORS.txt file.
Note: Install * polib (https://pypi.python.org/pypi/polib).
* pep8.
"""
print('Running unit tests.')
subprocess.check_output(["python", "example_project/manage.py", "test", "photologue"])
print('Running flake8 check.')
# See setup.cfg for configuration options.
subprocess.check_output(["flake8"])
print('Running isort check.')
subprocess.check_output(["isort", ".", "--check", "--quiet"])
print('Checking that we have no outstanding DB migrations.')
output = subprocess.check_output(["python", "example_project/manage.py", "makemigrations", "--dry-run",
"photologue"])
if not output == b"No changes detected in app 'photologue'\n":
raise Exception('There are outstanding migrations for Photologue.')
print('Updating CONTRIBUTORS.txt')
# This command will get the author of every commit.
output = subprocess.check_output(["git", "log", "--format='%aN'"])
# Convert to a list.
contributors_list = [contributor.strip("'") for contributor in output.decode('utf-8').split('\n')]
# Now add info from the translator files. This is incomplete, we can only list
# the 'last contributor' to each translation.
for language in os.listdir('photologue/locale/'):
filename = 'photologue/locale/{}/LC_MESSAGES/django.po'.format(language)
po = polib.pofile(filename)
last_translator = po.metadata['Last-Translator']
contributors_list.append(last_translator[:last_translator.find('<') - 1])
# Now we want to only show each contributor once, and to list them by how many
# contributions they have made - a rough guide to the effort they have put in.
contributors_dict = {}
for author in contributors_list:
author_copy = copy.copy(author)
if author_copy in ('', '(no author)', 'FULL NAME'):
# Skip bad data.
continue
# The creator of this project should always appear first in the list - so
# don't add him to this list, but hard-code his name.
if author_copy in ('Justin Driscoll', 'justin.driscoll'):
continue
# Handle contributors who appear under multiple names.
if author_copy == 'richardbarran':
author_copy = 'Richard Barran'
if author_copy in contributors_dict:
contributors_dict[author_copy] += 1
else:
contributors_dict[author_copy] = 1
with codecs.open('CONTRIBUTORS.txt', 'w', encoding='utf8') as f:
f.write('Photologue is made possible by all the people who have contributed'
' to it. A non-exhaustive list follows:\n\n')
f.write('Justin Driscoll\n')
for i in sorted(contributors_dict, key=contributors_dict.get, reverse=True):
f.write(i + '\n')
# And commit the new contributors file.
if subprocess.check_output(["git", "diff", "CONTRIBUTORS.txt"]):
subprocess.check_output(["git", "commit", "-m", "Updated the list of contributors.", "CONTRIBUTORS.txt"])
| [
"[email protected]"
] | |
03665678c340fd12dded68cb93404683636a2552 | 63ce91bae5eeadf885262b8fe0e769a64454d257 | /ignite_template.py | 860100b069677d6072b88c13df487527f4e5f296 | [
"Apache-2.0"
] | permissive | Data-drone/cv_experiments | c7349e7808f7f9c1315ce1efe33be1f86f4a9f80 | d6e1d9716c03a9165e3d8a08f4cc1287323a56ca | refs/heads/master | 2021-06-26T04:33:10.079771 | 2021-01-19T11:40:30 | 2021-01-19T11:40:30 | 196,596,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,638 | py | ### Ignite Example
from argparse import ArgumentParser
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from tqdm import tqdm
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model,
metrics={'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)},
device=device)
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(
initial=0, leave=False, total=len(train_loader),
desc=desc.format(0)
)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
if iter % log_interval == 0:
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll)
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--log_interval', type=int, default=10,
help='how many batches to wait before logging training status')
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval) | [
"[email protected]"
] | |
a0a3845ac7ef4d04ce903f51f533772194a7f499 | a0c60bd23fbdc7a89786d1f775455057aeb32701 | /torch/onnx/_constants.py | 8b71a4f86c173d18eec7d8955e92137a7d42a1e7 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | cpuhrsch/pytorch | 474dd74a729c11970af0a010d3f076e8ef31b74f | be327ec08f320e256d444693dde65fe55831bc46 | refs/heads/master | 2023-06-23T04:33:16.514572 | 2022-09-30T18:51:43 | 2022-09-30T18:51:43 | 118,517,346 | 2 | 2 | NOASSERTION | 2022-05-24T00:58:21 | 2018-01-22T21:26:11 | C++ | UTF-8 | Python | false | false | 355 | py | """Constant values used in ONNX."""
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
ONNX_BASE_OPSET = 9
ONNX_MIN_OPSET = 7
ONNX_MAX_OPSET = 17
# ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py
ONNX_DEFAULT_OPSET = 14
ONNX_CONSTANT_FOLDING_MIN_OPSET = 9
PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues"
| [
"[email protected]"
] | |
9dd608ba30dfbbde63634e389ca85fd9e593e4db | dbbb048a0e494d92ee3851b0e67836ae38b147b5 | /util/cdx2db.py | b2bd77d54d92546ab1d77b31b793c795412c9883 | [
"Unlicense"
] | permissive | ArchiveTeam/justintv-index | 4c5ce07711c8357dbfedaec14ffe0bc3e783ab69 | 3c6397f54a9e50456ad683a944c32db2aeee4153 | refs/heads/master | 2021-01-18T16:22:52.003475 | 2014-09-05T20:41:15 | 2014-09-05T20:41:15 | 22,726,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | '''Process the CDX files into a database.'''
import gzip
import argparse
import dbm
import re
def read_cdx(filename):
with gzip.open(filename, 'rt') as in_file:
header = in_file.readline()
assert header.rstrip() == ' CDX N b a m s k r M S V g'
for line in in_file:
(massaged_url, date, url, mime_type, status_code,
sha1_checksum, redirect, aif_meta_tags, compressed_archive_size,
archive_offset, filename) = line.rstrip().split()
yield (massaged_url, date, url, mime_type, status_code,
sha1_checksum, redirect, aif_meta_tags, compressed_archive_size,
archive_offset, filename)
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('cdx_file', nargs='+')
args = arg_parser.parse_args()
video_2_user_db = dbm.open('video_2_user.dbm', 'c')
video_2_server_db = dbm.open('video_2_server.dbm', 'c')
for cdx_file in args.cdx_file:
print('Opening', cdx_file)
for row in read_cdx(cdx_file):
(massaged_url, date, url, mime_type, status_code,
sha1_checksum, redirect, aif_meta_tags, compressed_archive_size,
archive_offset, filename) = row
match = re.search(r'justin\.tv/([^/]+)/\w/([\d]+)', url)
if match:
user = match.group(1)
video_id = match.group(2)
print(video_id, user)
video_2_user_db[video_id] = user
match = re.search(r'store.+_([\d]+)\.', url)
if match:
video_id = match.group(1)
print(video_id, url)
video_2_server_db[video_id] = url
video_2_user_db.close()
video_2_server_db.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.