blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d1e1ea3ca62fa8c7eee1ea56bcf21143db9db802 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03962/s618919847.py | 7c3073e9045ef040a154ec5d8acb94554d03edf5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import sys
import math
from functools import reduce
import bisect
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def input():
return sys.stdin.readline().rstrip()
# input = sys.stdin.buffer.readline
def index(a, x):
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
return False
#############
# MAIN CODE #
#############
a, b, c = getNM()
print(len({a, b, c}))
| [
"[email protected]"
] | |
92275752bbea081287f13884cac8c5b556fa1fd2 | 5c58587ebfbf56192b3dc6ed6f43bc002c8e2cff | /payments/migrations/0026_auto_20180906_1023.py | bb3127132c77be7ffde946ce16ac96b8870c7008 | [] | no_license | hossamelneily/nexchange | fb9a812cfc72ac00b90cf64d6669a8129c2d2d4b | 6d69274cd3808989abe2f5276feb772d1f0fa8b4 | refs/heads/release | 2022-12-13T09:20:47.297943 | 2019-02-12T08:20:34 | 2019-02-12T08:20:34 | 210,064,740 | 1 | 2 | null | 2022-12-09T00:54:01 | 2019-09-21T23:19:34 | Python | UTF-8 | Python | false | false | 4,388 | py | # Generated by Django 2.0.7 on 2018-09-06 10:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0066_remove_transactionprice_type'),
('payments', '0025_auto_20180822_1537'),
]
operations = [
migrations.CreateModel(
name='Bank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
('website', models.URLField(blank=True, null=True)),
('phone', models.CharField(blank=True, max_length=50, null=True)),
('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Country')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BankBin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('bin', models.CharField(default=None, max_length=15, unique=True)),
('checked_external', models.BooleanField(default=False)),
('bank', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.Bank')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CardCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CardLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CardType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='bankbin',
name='card_company',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.CardCompany'),
),
migrations.AddField(
model_name='bankbin',
name='card_level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.CardLevel'),
),
migrations.AddField(
model_name='bankbin',
name='card_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.CardType'),
),
migrations.AddField(
model_name='paymentpreference',
name='bank_bin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.BankBin'),
),
]
| [
"[email protected]"
] | |
549275e873f106430ed837c7a06752d2258c7bdc | 66a672f802a1d59efaffb9b11dc2f508ccd024e6 | /parse_LN_to_JSON_old.py | 76896c08c8d5f0e0acfb96e9d2a426415a0207d4 | [
"Apache-2.0"
] | permissive | dallascard/LN_tools | 3b7af1a6b064f5b7dc540a04d4fae1a0b2e8f805 | 66be00f1fd11517f7bbf2949cc70f9552f3af4f4 | refs/heads/master | 2021-01-12T16:02:17.130400 | 2019-10-26T00:05:00 | 2019-10-26T00:05:00 | 71,923,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,161 | py | """
parse_LN.py
Parse a single file or a directory of raw files from Lexis-Nexis,
which come as text files containing a block of news articles concatenated into one file.
Objective is to split articles into individual files and extract relevant
information
In general, the articles have:
a source (newspaper name)
a well-defined date
sometimes a title after the date
some possible top tags, including author (byline) and length
some paragraphs of text (usually)
many possible end tags (some of which include relvance percentages)
a copyright (usually)
Also, all tags will (usually) be in the form 'TAG: content'
Unfortunately, there is a lot that can go wrong, including missing sections,
broken lines, unusually formats, strangely converted characters,
and randomly copied text. We do the best we can.
"""
# import modules
from optparse import OptionParser
from os import path, makedirs
from json import dump
from unicodedata import normalize
import codecs
import re
import glob
# This function writes an individual article to a text file, unchanged
# Yes, it works purely on global variables...
def write_text_file():
if doc.has_key(u'CASE_ID'):
output_file_name = output_dir + '/' + prefix + str(doc[u'CASE_ID']) + '.txt'
output_file = codecs.open(output_file_name, mode='w', encoding='utf-8')
output_file.writelines(output_text)
output_file.close()
# This function writes a parsed version of an article as a JSON object
# It too relies on global variables...
def write_json_file():
# assume we have a dictionary named doc
# it should have a case_id
if doc.has_key(u'CASE_ID'):
# get the top tags, and put them in a dictionary
top = {}
for t in top_tags:
# split the tag into TAG and TEXT (at the colon)
index = t.find(':')
tag = t[:index]
text = t[index+1:]
# strip off whitespace
text = text.lstrip()
top[tag] = text
# store the top tags and anything else from the top section which didn't fit
top[u'TOP_MISC'] = top_misc
doc[u'TOP'] = top
# store the paragraphs of body text in BODY
doc[u'BODY'] = paragraphs
# get the bottom tags and put them in a dictionary, as with top tags
bottom = {}
for t in end_tags:
index = t.find(':')
tag = t[:index]
text = t[index+1:]
text = text.lstrip()
bottom[tag] = text
bottom[u'BOTTOM_MISC'] = end_misc
doc[u'BOTTOM'] = bottom
# output the overall dictionary as a json file
output_file_name = json_dir + '/' + prefix + str(doc[u'CASE_ID']) + '.json'
output_file = codecs.open(output_file_name, mode='w', encoding='utf-8')
dump(doc, output_file, ensure_ascii=False, indent=2)
output_file.close()
# Tags used at the top and bottom of L-N files
TOP_TAGS = [u'BYLINE:', u'DATELINE:', u'HIGHLIGHT:', u'LENGTH:', u'SECTION:', u'SOURCE:', u'E-mail:', ]
END_TAGS = [u'CATEGORY:', u'CHART:', u'CITY:', u'COMPANY:', u'CORRECTION-DATE:', u'COUNTRY:', u'CUTLINE:', u'DISTRIBUTION:', u'DOCUMENT-TYPE:', u'ENHANCEMENT:', u'GEOGRAPHIC:', u'GRAPHIC:', u'INDUSTRY:', u'JOURNAL-CODE:', u'LANGUAGE:', u'LOAD-DATE:', u'NOTES:', u'ORGANIZATION:', u'PERSON:', u'PHOTO:', u'PHOTOS:', u'PUBLICATION-TYPE:', u'SERIES:', u'STATE:', u'SUBJECT:', u'TICKER:', u'TYPE:', u'URL:']
MONTHS = {u'january':1, u'february':2, u'march':3, u'april':4, u'may':5, u'june':6, u'july':7, u'august':8, u'september':9, u'october':10, u'november':11, u'december':12}
# set up an options parser
usage = 'usage %prog [options] (must specify -f OR -d)'
parser = OptionParser(usage=usage)
parser.add_option('-f', help='read in FILE', metavar='FILE')
parser.add_option('-d', help='read in in ALL files in INDIR', metavar='INDIR')
parser.add_option('-o', help='output individual files to DIR', metavar='DIR', default='./temp/text/')
parser.add_option('-j', help='output individal xml files to JDIR', metavar='JDIR', default='./temp/json/')
parser.add_option('-p', help='prefix for output text files [default = %default]', metavar='PRE', default='prefx.0-')
parser.add_option("-w", action="store_true", dest="write_files", default=False,
help="Write individual .txt files [default = %default]")
(options, args) = parser.parse_args()
case_id = 0 # unique id for each article (doc)
total_expected_docs = 0 # total numbe of artcles we expect to get from all L-N files
total_docs_found = 0 # running count of listed numbers of docs
tag_counts = {} # counts of how many times we see each tag
first_tag_counts = {} # counts of how any times we see each tag as the first tag
# If the output directories do not exist, create them
output_dir = options.o
if not path.exists(output_dir):
makedirs(output_dir)
json_dir = options.j
if not path.exists(json_dir):
makedirs(json_dir)
# get the prefix to use when naming files
prefix = options.p
# get a list of files to parse, either a single file, or all files in a directory
files = []
input_file_name = options.f
if input_file_name == None:
input_dir = options.d
if path.exists(input_dir):
files = glob.glob(input_dir + '/*')
else:
files = [input_file_name]
print "Found", len(files), "files."
# sort the files and parse them one by one
files.sort()
for f in files:
# open the next file, and read it in
input_file_name = f
name_parts = input_file_name.split('/')
orig_file_name = name_parts[-1]
# open with utf-8-sig encoding to eat the unicode label
input_file = codecs.open(input_file_name, encoding='utf-8-sig')
input_text = input_file.read()
input_file.close()
# split the text into individual lines
lines = input_text.split('\r\n')
doc = {} # store the article we are working on as a dictionary
doc_count = 0 # count of how many articles we have found
doc_num = 0 # document number in the original L-N file
expected_docs = 0 # the number of articles we expect to find in this L-N file
# process each line, one at a time
for line in lines:
# first, normalize the unicode (to get rid of things like \xa0)
orig_line = line
line = normalize('NFKD', line)
# start off looking for new document (each of which is marked as below)
# also, store the numbers from this pattern as groups for use below
match = re.search(u'([0-9]+) of ([0-9]+) DOCUMENTS', line)
# if we find a new article
if match:
# first, save the article we are currently working on
if doc_num > 0:
if options.write_files:
# write the original file as a text file, unmodified
write_text_file()
# also write the (parsed) article as a json object
write_json_file()
# now move on to the new artcle
# check to see if the document numbering within the L-N file is consisent
# (i.e. the next document should be numbered one higher than the last)
if int(match.group(1)) != doc_num + 1:
print "Missed document after " + input_file_name + ' ' + str(doc_num)
# if this is the first article in the L-N file, get the expected number of docs
if expected_docs == 0:
expected_docs = int(match.group(2))
total_expected_docs += expected_docs
elif (expected_docs != int(match.group(2))):
print "Discrepant document counts after", input_file_name, str(doc_num-1)
# get the document number from the original L-N file
doc_num = int(match.group(1))
# assign a new, unique, case id
case_id += 1
# add one to the number of documents we've seen
doc_count += 1
# start a new document as a dictionary
doc = {}
# store what we know so far
doc[u'CASE_ID'] = case_id # unique identifier
doc[u'ORIG_FILE'] = orig_file_name # filename of the original L-N file
doc[u'ORIG_ID'] = doc_num # document number in the L-N file
current = u'' # current stores the block we are currently working on
output_text = [] # a list of lines to write to the text file
top_tags = [] # a list of top tags
paragraphs = [] # a list of body paragraphs
end_tags = [] # a list of end tags
top_misc = u'' # things we can't parse from the top of the article
end_misc = u'' # things we can't parse from the bottom of the article
have_length = False
have_section = False
# once we've started working on a document...
elif (doc_num > 0):
match = False
# check if thee's anything on this line
if (line != u''):
# if so, strip the whitespace and add the current line to our working line
temp = line.lstrip()
temp = temp.rstrip()
current += temp + ' '
# if not, process the line(s) we've been working on...
elif (current != u''):
current = current.rstrip()
# first check to see if this looks like a tag
tag_match = re.search(u'^([A-Z]+[-]?[A-Z]+:)', current)
if tag_match:
tag = tag_match.group(1)
# if we find a tag, see if it's a top tag
if (tag in TOP_TAGS) and (len(paragraphs) == 0):
if tag == u'LENGTH:':
if have_length == False:
top_tags.append(current)
have_length = True
match = True
elif tag == u'SECTION:':
if have_section == False:
top_tags.append(current)
have_section = True
match = True
else:
top_tags.append(current)
match = True
# then see if it's a bottom tag
elif (tag in END_TAGS) and ((len(paragraphs)>0) or have_section or have_length):
# deal with it as an end tag:
end_tags.append(current)
match = True
# if it's not a tag, but we already have end tags, continue with the end
if match == False and len(end_tags) > 0:
# deal with this as bottom matter
# pick up the copyright if it's there
pattern = re.search(u'^Copyright ', current)
if pattern:
if not doc.has_key(u'COPYRIGHT'):
doc[u'COPYRIGHT'] = current
# otherwise,
else:
# sometimes the end tags get split over multiple lines
# i.e., if this text contains '(#%)'
pattern = re.search(u'\([0-9]+%\)', current)
if pattern:
end_tags[-1] += ' ' + current
# or if the last tag was just a tag with no content
elif end_tags[-1] in END_TAGS:
end_tags[-1] += ' ' + current
# not foolproof... store the rest in misc
else:
end_misc += current + u' ** '
match = True
# then, check if it could be a date for the artcle
if match == False and not doc.has_key(u'DATE'):
date_match = re.search('^([a-zA-Z]*).?\s*(\d\d?).*\s*(\d\d\d\d).*', current)
month_yyyy_match = re.search('^([a-zA-Z]*).?\s*(\d\d\d\d).*', current)
if date_match:
month_name = date_match.group(1)
month_name = month_name.lower()
day = date_match.group(2)
year = date_match.group(3)
if MONTHS.has_key(month_name):
month = MONTHS[month_name]
doc[u'DATE'] = current
doc[u'MONTH'] = int(month)
doc[u'DAY'] = int(day)
doc[u'YEAR'] = int(year)
# also store the date in the format YYYYMMDD
fulldate = year + str(month).zfill(2) + day.zfill(2)
doc[u'FULLDATE'] = fulldate
match = True
# try an alternate date format
elif month_yyyy_match:
month_name = month_yyyy_match.group(1)
month_name = month_name.lower()
year = month_yyyy_match.group(2)
if MONTHS.has_key(month_name):
doc[u'DATE'] = current
month = MONTHS[month_name]
doc[u'MONTH'] = int(month)
doc[u'YEAR'] = int(year)
match = True
# if its not a tag or date, and we don't have end tags
if match == False:
# check if we have any top tags
if len(top_tags) == 0:
# if not, check if we have a date
if not doc.has_key(u'DATE'):
# if not, assume this is a part of the source
source = current.lower()
source = re.sub('^the', '', source, 1)
source = source.lstrip()
if doc.has_key(u'SOURCE'):
doc[u'SOURCE'] = doc[u'SOURCE'] + u'; ' + source
else:
doc[u'SOURCE'] = source
match = True
# if we do have top tags, assume this is a title
else:
# assuming we don't already have a title
if not doc.has_key(u'TITLE'):
doc[u'TITLE'] = current
match = True
# don't move onto the body until we at least one tag
if (match == False) and (have_length == False) and (have_section == False):
top_misc += current + u' ** '
match = True
# in all other cases, assume this is part of the body
if match == False:
# Try to deal with paragraphs that have been split over mutiple lines
# By default, assume we'll just append the current working line
# to the body
append = True
# if we have at least one paragraph
if len(paragraphs) > 0:
# Look at the end of the last paragraph and the start of
# this one to see if a line has been split.
# First, try to join hyperlinks, email addresses and
# hyphenated words that have been split
if re.search(u'[/@-]$', paragraphs[-1]):
if re.search(u'^[a-z]', current):
paragraphs[-1] = paragraphs[-1] + u'' + current
append = False
# Also search for the symbols at the start of the next line
elif re.search(u'^[/@]', current):
paragraphs[-1] = paragraphs[-1] + 'u' + current
append = False
# Finally, try to join sentences that have been split
# i.e. the last paagraph doesn't end with an end character
elif not re.search(u'[\.\"\'?!:_]$', paragraphs[-1]):
# and the next paragraph doesn't start with a start symbol.
if not re.search(u'^[A-Z"\'>*-\.\(0-9=\$%_]|(http)|(www)', current):
paragraphs[-1] = paragraphs[-1] + u' ' + current
append = False
# in all other cases, just add the input as a new paragraph
if (append == True):
paragraphs.append(current)
# start a new working line
current = u''
output_text.append(orig_line + u'\r\n')
total_docs_found += doc_count
# once we reach the end of the file, output the current document
# and then go to the next file
if doc_num > 0:
if options.write_files:
write_text_file()
write_json_file()
# print a summary for the L-N file
print 'Processed', orig_file_name + ': ', 'Expected:', expected_docs, ' Found:', doc_count
# and print a final summary of everything
print 'Total number of documents expected: ' + str(total_expected_docs)
print 'Total number of documents found: ' + str(total_docs_found)
| [
"[email protected]"
] | |
7b10bae824de4ead5ffbb387b689114066ec431d | 5d304c6ec0f01edee73e3b612f84307060c0da54 | /letter_combinations_of_a_phone_number.py | 089f3c0c2377b7e620e80a61fbd5b12517d716e8 | [] | no_license | xartisan/leetcode-solutions-in-python | cfa06b9e02f7ec0446cf6b71df4ea46caa359adc | 7e3929a4b5bd0344f93373979c9d1acc4ae192a7 | refs/heads/master | 2020-03-14T17:10:07.957089 | 2018-07-29T10:11:01 | 2018-07-29T10:11:01 | 131,713,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
keys = ["", "", "abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"]
rv = []
for d in digits:
d = int(d)
tmp = []
for c in keys[d]:
if rv:
tmp.extend(s + c for s in rv)
else:
tmp.append(c)
rv = tmp
return rv
| [
"[email protected]"
] | |
d3f0f22a9f875992c367e7fce63ee8366b08f220 | 5254c3a7e94666264120f26c87734ad053c54541 | /Revision de Pares/Semana N°5/Caso 2/05-0-gin-fileparse.py-vir-2020-09-08_19.44.49.py | c1df151428fe518671cc730320bf9ea5a29de07f | [] | no_license | ccollado7/UNSAM---Python | 425eb29a2df8777e9f892b08cc250bce9b2b0b8c | f2d0e7b3f64efa8d03f9aa4707c90e992683672d | refs/heads/master | 2023-03-21T17:42:27.210599 | 2021-03-09T13:06:45 | 2021-03-09T13:06:45 | 286,613,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | #fileparse.py
import csv
def parse_csv(nombre_archivo, select = None, types = None, has_headers=True):
'''
Parsea un archivo CSV en una lista de registros.
Se puede seleccionar sólo un subconjunto de las columnas, determinando el parámetro select, que debe ser una lista de nombres de las columnas a considerar.
'''
with open(nombre_archivo) as f:
filas = csv.reader(f)
if has_headers:
# Lee los encabezados del archivo
encabezados = next(filas)
if select:
# Si se indicó un selector de columnas,
# buscar los índices de las columnas especificadas.
# Y en ese caso achicar el conjunto de encabezados para diccionarios
indices = [encabezados.index(nombre_columna) for nombre_columna in select]
encabezados = select
else:
indices = []
registros = []
for fila in filas:
if not fila: # Saltear filas vacías
continue
# Filtrar la fila si se especificaron columnas
if indices:
if types:
fila = [tipo(fila[index]) for index,tipo in zip(indices,types)]
else:
fila = [fila[index] for index in indices]
# Armar el diccionario
registro = dict(zip(encabezados, fila))
registros.append(registro)
else:
registros = []
for fila in filas:
if not fila: # Saltear filas vacías
continue
if types:
fila = [tipo(elem) for tipo,elem in (zip(types, fila))]
# Agregar la tupla
registro = tuple(fila)
registros.append(registro)
return registros
#%%
camion_1 = parse_csv('camion.csv', types=[str, int, float])
print(camion_1)
#%%
camion_2 = parse_csv('camion.csv', types=[str, str, str])
print(camion_2)
#%%
camion_3 = parse_csv('camion.csv', select = ['nombre', 'cajones'], types=[str, int])
print(camion_3)
#%%
camion_4 = parse_csv('camion.csv', types=[str, str, float])
print(camion_4)
#%%
camion_5 = parse_csv('camion.csv', types=[str, int, str])
print(camion_5)
| [
"[email protected]"
] | |
7cd784758941fdaddfc4f4813a364aa657bacadf | 7f108151d95b49bdcaec90e7b1978859f603d962 | /source/map/map_image.py | c1a0ef2173b42c8f51487752622362033c7b2077 | [] | no_license | thydungeonsean/Shinar | 205b20bf47ace29dde14ef2822449ee31ceeeca0 | 5bbb42aafe4ea1f54a69649a242241c3ca96926c | refs/heads/master | 2021-04-29T10:08:17.300934 | 2017-04-04T12:06:48 | 2017-04-04T12:06:48 | 77,847,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,839 | py | from ..constants import *
import pygame
import os
from ..images.image import Image
class MapImageGenerator(object):
instance = None
@classmethod
def get_instance(cls):
if cls.instance is not None:
return cls.instance
else:
cls.instance = cls()
return cls.instance
"""
The image generator will scan through a map,
and compile dither tile / wall tile combos
as needed. It will store them in dither_patterns
or wall patterns to be reused.
dither_sets will hold recolored images based on the
patterns as needed.
"""
def __init__(self):
self.tile_images = self.init_tile_images()
self.dither_gen = DitherImageGenerator()
self.dither_patterns = {}
# self.wall_patterns = {}
self.dither_sets = {
1: {},
2: {},
3: {}
}
# init methods
def init_tile_images(self):
tile_images = {
0: MapTileImage('desert'),
1: MapTileImage('plain'),
2: MapTileImage('fertile'),
3: MapTileImage('river')
}
return tile_images
def generate_image(self, map):
mw = map.w * TILEW
mh = map.h * TILEH
map_image = pygame.Surface((mw, mh))
for y in range(map.h):
for x in range(map.w):
tile_id = map.map[x][y]
if tile_id not in self.tile_images.keys():
tile_id = 0
img = self.tile_images[tile_id]
img.position((x, y))
img.draw(map_image)
dithered_edge_maps = self.get_dithered_edges(map)
for k in (1, 2, 3):
edge_map = dithered_edge_maps[k]
for x, y in edge_map.keys():
img = self.get_dither_image(k, edge_map[(x, y)])
img.position((x, y))
img.draw(map_image)
return map_image
def get_dithered_edges(self, map):
dithered_ids = (1, 2, 3)
dither_points = {
1: [],
2: [],
3: []
}
for y in range(map.h):
for x in range(map.w):
value = map.map[x][y]
if value in dithered_ids:
dither_points[value].append((x, y))
plain_dithered_edge = self.get_dithered_edge(map, dither_points[1], 0)
fertile_dithered_edge = self.get_dithered_edge(map, dither_points[2], 1)
river_dithered_edge = self.get_dithered_edge(map, dither_points[3], 2)
#river_dithered_edge = self.get_dithered_edge(map, dither_points[2], 3)
return {1: plain_dithered_edge,
2: fertile_dithered_edge,
3: river_dithered_edge}
def get_dithered_edge(self, map, points, cover_terrain):
dither = {}
visited = set()
for x, y in points:
adj = map.get_adj((x, y), diag=True)
for ax, ay in adj:
if map.map[ax][ay] == cover_terrain and (ax, ay) not in visited:
visited.add((ax, ay))
dither[(ax, ay)] = self.get_dither_value(map, map.map[x][y], (ax, ay))
return dither
def get_dither_value(self, map, cover_type, (x, y)):
edge_coords = ((x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y),
(x+1, y+1), (x, y+1), (x-1, y+1), (x-1, y))
i = 0
value = set()
for ex, ey in edge_coords:
if map.is_on_map((ex, ey)) and map.map[ex][ey] == cover_type:
value.add(i)
i += 1
value = list(value)
value.sort()
return tuple(value)
def get_dither_image(self, terrain, dither_value):
dither_set = self.dither_sets[terrain]
if dither_value in dither_set.keys():
return dither_set[dither_value]
if dither_value in self.dither_patterns.keys():
img = self.dither_gen.recolor_pattern(self.dither_patterns[dither_value], terrain)
dither_set[dither_value] = img
return img
else:
pattern = self.dither_gen.generate_pattern(dither_value)
self.dither_patterns[dither_value] = pattern
img = self.dither_gen.recolor_pattern(pattern, terrain)
dither_set[dither_value] = img
return img
class DitherImageGenerator(object):
color_key = {
1: PLAIN_BROWN,
2: FERTILE_GREEN,
3: RIVER_BLUE
#3: PLAIN_BROWN
# 1: RED, 2: RED, 3: RED
}
dither_key = {
'a': (0, 0),
'b': (TILEW, 0),
'c': (TILEW*2, 0),
'd': (0, TILEH),
'e': (TILEW, TILEH),
'f': (TILEW*2, TILEH),
'g': (0, TILEH*2),
'h': (TILEW, TILEH*2)
}
def __init__(self):
self.dither_tileset = TileImage('dither', colorkey=WHITE)
def generate_pattern(self, d_value):
d_img = TileImage(colorkey=WHITE)
image_instructions = self.parse_dither_value(d_value)
for d_id, pos in image_instructions:
self.add_dither_segment(d_img, d_id, pos)
return d_img
def recolor_pattern(self, pattern, terrain):
img = TileImage(colorkey=WHITE)
pattern.draw(img)
color = DitherImageGenerator.color_key[terrain]
img.recolor(BLACK, color)
return img
def parse_dither_value(self, value):
parsed = set()
card = {1, 3, 5, 7}
diag = {0, 2, 4, 6}
cardinals = []
for e in value:
if e in card:
cardinals.append(e)
if len(cardinals) < 3:
for e in value: # checking for outer diagonal corners
if e in diag and self.corner_is_isolate(value, e):
parsed.add(('b', e))
elif len(cardinals) == 4:
parsed = [('d', 1), ('d', 3), ('d', 5), ('d', 7),
('c', 0), ('c', 2), ('c', 4), ('c', 6)]
return parsed
for e in cardinals: # check for solid edges
if self.edge_is_isolate(value, e):
parsed.add(('a', e))
else:
parsed.add(('d', e))
if self.edge_has_one_adj(value, e, 2):
end, connector = self.get_corner_values(value, e)
parsed.add(('e', end))
parsed.add(('c', connector))
else:
adj = self.get_adj_edges(e, 1)
for ae in adj:
parsed.add(('c', ae))
return list(parsed)
def get_adj_edges(self, e, step):
raw_adj = (e + step, e - step)
adj = []
for ae in raw_adj:
if ae < 0:
adj.append(ae + 8)
elif ae > 7:
adj.append(ae - 8)
else:
adj.append(ae)
return adj
def get_corner_values(self, value, e):
corners = self.get_adj_edges(e, 1)
end = None
connector = None
for corner in corners:
if self.edge_has_one_adj(value, corner, 1):
end = corner
else:
connector = corner
return end, connector
def corner_is_isolate(self, value, e):
adj = self.get_adj_edges(e, 1)
for ae in adj:
if ae in value:
return False
return True
def edge_is_isolate(self, value, e):
adj = self.get_adj_edges(e, 2)
for ae in adj:
if ae in value:
return False
return True
def edge_has_one_adj(self, value, e, step):
adj = self.get_adj_edges(e, step)
num = 0
for ae in adj:
if ae in value:
num += 1
if num < 2:
return True
else:
return False
def add_dither_segment(self, d_img, d_id, pos):
if d_id in ('a', 'b', 'c', 'd'):
self.add_rotated_img(d_img, d_id, pos)
else: # d_id is 'e'
if pos == 0:
self.add_rotated_img(d_img, 'e', 0)
elif pos == 2:
self.add_rotated_img(d_img, 'f', 0)
elif pos == 4:
self.add_rotated_img(d_img, 'g', 0)
elif pos == 6:
self.add_rotated_img(d_img, 'h', 0)
def add_rotated_img(self, d_img, d_id, pos):
ang_dict = {0: 0, 1: 0, 2: -90, 3: -90, 4: 180, 5: 180, 6: 90, 7: 90}
img = pygame.Surface((TILEW, TILEH))
img.fill(WHITE)
img.set_colorkey(WHITE)
x, y = DitherImageGenerator.dither_key[d_id]
img.blit(self.dither_tileset.image, (0, 0), (x, y, TILEW, TILEH))
img = pygame.transform.rotate(img, ang_dict[pos])
d_img.blit(img, img.get_rect())
class TileImage(Image):
def __init__(self, imagename=None, colorkey=None):
Image.__init__(self, imagename, colorkey)
def position(self, (x, y)):
self.rect.topleft = ((x * TILEW) + self.x_offset, (y * TILEH) + self.y_offset)
class MapTileImage(TileImage):
def __init__(self, imagename=None):
Image.__init__(self, imagename)
| [
"[email protected]"
] | |
77646e2ec0616be8c2082741e2ca6efa9902dd3a | ef457162d79be971f52ee96b1891764a2d230e8b | /demo.py | 0b61466993849c1bffa5dd4056ad7be10ebc7073 | [] | no_license | LuoJiaji/modularCNN | f2239f6b4ed378fede4401f6e90d9b1d5acc8c70 | b8591c3924abeccaebfad56289a185f904da8608 | refs/heads/master | 2020-06-18T12:57:59.192061 | 2019-07-11T13:20:08 | 2019-07-11T13:20:08 | 196,309,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,242 | py | import random
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.preprocessing import image
from keras.models import Model, load_model
from keras.layers import Input, Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import RMSprop, SGD
from keras.utils.vis_utils import plot_model
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_test = np.expand_dims(x_test, axis = 3)
def get_random_batch(x, y, l, batchsize):
ind_p = np.where(y_train == l)[0]
ind_n = np.where(y_train != l)[0]
x_batch = []
y_batch = []
l_p = len(ind_p)
l_n = len(ind_n)
for i in range(int(batchsize/2)):
ind = random.randrange(l_p)
x_batch.append(x[ind_p[ind]])
y_batch.append(1)
# print(y[ind_p[ind]])
ind = random.randrange(l_n)
x_batch.append(x[ind_n[ind]])
y_batch.append(0)
# print(y[ind_n[ind]])
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
y_batch = y_batch.astype('float32')
return x_batch, y_batch
x_batch, y_batch = get_random_batch(x_train, y_train, 0, 128)
input_shape = (28,28,1)
input_data = Input(shape=input_shape)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_data)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dense(1, activation='sigmoid', name='fc2')(x)
model = Model(input_data, x)
#model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy'])
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
#i=3
for i in range(10):
input_shape = (28,28,1)
input_data = Input(shape=input_shape)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_data)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dense(1, activation='sigmoid', name='fc2')(x)
model = Model(input_data, x)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
for it in range(5000):
x_batch, y_batch = get_random_batch(x_train, y_train, i, 256)
x_batch = np.expand_dims(x_batch, axis = 3)
train_loss, train_acc = model.train_on_batch(x_batch, y_batch)
if it % 100 == 0:
print('i:', i, 'it:', it, 'loss', train_loss, 'acc', train_acc)
model.save('./models/ModularCNN_' + str(i) + '.h5')
# 单个模型测试
i=9
model = load_model('./models/ModularCNN_9.h5')
test_label = np.copy(y_test)
test_label[np.where(y_test == i)] = 1
test_label[np.where(y_test != i)] = 0
#x_test = np.expand_dims(x_test, axis = 3)
pre = model.predict(x_test)
pre = pre[:,0]
pre[np.where(pre < 0.2)] = 0
pre[np.where(pre >= 0.2)] = 1
acc = np.mean(pre == test_label)
# 整合模型,综合测试
input_shape = (28,28,1)
input_data = Input(shape=input_shape)
model_0 = load_model('./models/ModularCNN_0.h5')
model_1 = load_model('./models/ModularCNN_1.h5')
model_2 = load_model('./models/ModularCNN_2.h5')
model_3 = load_model('./models/ModularCNN_3.h5')
model_4 = load_model('./models/ModularCNN_4.h5')
model_5 = load_model('./models/ModularCNN_5.h5')
model_6 = load_model('./models/ModularCNN_6.h5')
model_7 = load_model('./models/ModularCNN_7.h5')
model_8 = load_model('./models/ModularCNN_8.h5')
model_9 = load_model('./models/ModularCNN_9.h5')
output_0 = model_0(input_data)
output_1 = model_1(input_data)
output_2 = model_2(input_data)
output_3 = model_3(input_data)
output_4 = model_4(input_data)
output_5 = model_5(input_data)
output_6 = model_6(input_data)
output_7 = model_7(input_data)
output_8 = model_8(input_data)
output_9 = model_9(input_data)
model = Model(inputs = input_data,
outputs=[output_0, output_1, output_2, output_3, output_4,
output_5, output_6, output_7, output_8, output_9])
#plot_model(model, to_file='./models_visualization/modularCNN.pdf',show_shapes=True)
#plot_model(model, to_file='./models_visualization/modularCNN.png',show_shapes=True)
pre = model.predict(x_test)
pre = np.array(pre)
pre = np.squeeze(pre)
pre = pre.T
pre = np.argmax(pre, axis = 1)
acc = np.mean(pre == y_test)
## 未知数据测试
img = image.load_img('./dataset/img/G/Q2Fsdmlub0hhbmQudHRm.png', target_size=(28, 28))
img = image.img_to_array(img)
img = img/255
img = img[:,:,0]
plt.imshow(img)
img = np.expand_dims(img, axis=0)
img = np.expand_dims(img, axis=3)
pre = model.predict(img)
pre = np.array(pre)
pre = np.squeeze(pre)
img_rand = np.random.rand(1,28,28,1)
pre = model.predict(img)
pre = np.array(pre)
pre = np.squeeze(pre) | [
"[email protected]"
] | |
613558e1f0a6f4199d62e2feae12a2ba06b09eba | 66e45a2760db8a1fc580689586806c2e3cce0517 | /pymontecarlo/options/model/base.py | 8951f563fdc581a862298aeec9784c0e6a2631d2 | [] | no_license | arooney/pymontecarlo | 4b5b65c88737de6fac867135bc05a175c8114e48 | d2abbb3e9d3bb903ffec6dd56472470e15928b46 | refs/heads/master | 2020-12-02T18:01:42.525323 | 2017-05-19T16:44:30 | 2017-05-19T16:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | """
Base models.
"""
# Standard library modules.
import abc
import enum
# Third party modules.
# Local modules.
from pymontecarlo.options.base import Option
# Globals and constants variables.
class ModelMeta(enum.EnumMeta, abc.ABCMeta):
pass
class Model(Option, enum.Enum, metaclass=ModelMeta):
def __init__(self, fullname, reference=''):
self.fullname = fullname
self.reference = reference
def __eq__(self, other):
# NOTE: Must be implemented from Option,
# but should only used equality from Enum
return enum.Enum.__eq__(self, other)
def __str__(self):
return self.fullname
| [
"[email protected]"
] | |
bbb0e5789cc95e133b10dc78292d1330aa319f50 | 09d349155446f2f32519cfc7deb7f79b1138a158 | /kraft/actions.py | d7a5fba1e5bcf34353359243e9c51f253c87c7e3 | [] | no_license | marcin-/pardususer.de | 632d7fb4c5a9252dbcf82711a5da126523d3b8e8 | 1d4bb1d1f9da113cf2b8cbcc6b544ec9b9616862 | refs/heads/master | 2016-09-05T23:22:38.726769 | 2012-10-08T20:40:39 | 2012-10-08T20:40:39 | 6,114,809 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pisi.actionsapi import cmaketools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.makedirs("build")
shelltools.cd("build")
cmaketools.configure("-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS_RELEASE:STRING='-DNDEBUG -DQT_NO_DEBUG' \
-DCMAKE_C_FLAGS_RELEASE:STRING='-DNDEBUG'", sourceDir="..")
def build():
shelltools.cd("build")
cmaketools.make()
def install():
shelltools.cd("build")
cmaketools.install()
shelltools.cd("..")
pisitools.dodoc("TODO", "Changes.txt", "INSTALL", "README", "COPYING", "Releasenotes.txt", "AUTHORS")
| [
"[email protected]"
] | |
5005cb8e54066070f254014fede0db6ecb90ed09 | b6df7cda5c23cda304fcc0af1450ac3c27a224c1 | /nlp/preprocessing.py | 441402923997d2e7d7041d50ca10938068282e69 | [] | no_license | vieira-rafael/py-search | 88ee167fa1949414cc4f3c98d33f8ecec1ce756d | b8c6dccc58d72af35e4d4631f21178296f610b8a | refs/heads/master | 2021-01-21T04:59:36.220510 | 2016-06-20T01:45:34 | 2016-06-20T01:45:34 | 54,433,313 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 287 | py |
class PreProcessing:
def __init__(self):
stopwords = ["and","del","from","not","while","as","elif","global","or","with","assert","else","if","pass","yield","break","except","import","print","class","exec","in","raise","continue","finally","is","return","def","for","lambda","try"];
| [
"[email protected]"
] | |
d425739853edd3970661241960467b810be5829e | ab5731ae6e190a9b44b1cddbd11af89277302de9 | /read_json/data_json.py | 686c168a2ed574d935bcf65b3bbd202919f755d4 | [] | no_license | MachineLP/py_workSpace | e532781aab51c54a87602c387acd3199f9a75140 | 7937f3706e8d2d8a0e25ba0648bee6d1fcb27234 | refs/heads/master | 2021-08-29T02:56:02.415509 | 2021-08-23T10:38:59 | 2021-08-23T10:38:59 | 117,516,956 | 22 | 18 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
"""
import sys
import numpy as np
import json as js
class load_image_from_json(object):
def __init__(self, json_file):
self.json_file = json_file
def __del__(self):
pass
def js_load(self):
f = open(self.json_file, 'r')
js_data = js.load(f)
return js_data
if __name__ == "__main__":
all_data = load_image_from_json('0(6015).json').js_load()
for data in all_data:
print (data['image_id'])
print (data['keypoint']['human1']) | [
"[email protected]"
] | |
b35422cbf3d8501bfd9d006f2035134b3d022010 | 327a8fe2743bde7f49b19914e4d62091cb7c79d6 | /upload/wsgi.py | d97d7643e5921ed05ee7ec9f48320185ec321262 | [
"MIT"
] | permissive | danrneal/raft-drf-exercise | 3de78d115e02a3739911feb30e1b96f482b873e0 | f62d2f05cd085f7a8d9b89f4ecee2c76feb4b47e | refs/heads/main | 2023-08-03T17:04:14.583022 | 2021-09-22T19:53:08 | 2021-09-22T19:53:08 | 312,690,985 | 0 | 0 | MIT | 2021-09-22T19:53:09 | 2020-11-13T21:47:48 | Python | UTF-8 | Python | false | false | 389 | py | """
WSGI config for upload project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'upload.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
d87050e9f0620d49c9b7e96014c4fa531605ba4a | 64ab5b65afdf8d950c4b56ad2259133b95fc2fec | /zeus/migrations/e373a7bffa18_unique_build_failures.py | c118ddf8f86aee0ea630a9b38be70d3beae61969 | [
"Apache-2.0"
] | permissive | getsentry/zeus | 3e88895443b23278fdb4c25121422ee214630512 | 6d4a490c19ebe406b551641a022ca08f26c21fcb | refs/heads/master | 2023-09-01T14:20:11.396306 | 2021-04-30T17:08:33 | 2021-04-30T17:08:33 | 96,131,433 | 222 | 27 | Apache-2.0 | 2022-06-01T03:17:16 | 2017-07-03T16:39:35 | Python | UTF-8 | Python | false | false | 897 | py | """unique_build_failures
Revision ID: e373a7bffa18
Revises: 54bbb66a65a6
Create Date: 2020-03-13 09:25:38.492704
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e373a7bffa18"
down_revision = "54bbb66a65a6"
branch_labels = ()
depends_on = None
def upgrade():
# first we clean up duplicate rows
connection = op.get_bind()
connection.execute(
"""
DELETE FROM failurereason a
USING failurereason b
WHERE a.id > b.id
AND a.reason = b.reason
AND a.build_id = b.build_id
"""
)
op.create_index(
"unq_failurereason_buildonly",
"failurereason",
["build_id", "reason"],
unique=True,
postgresql_where=sa.text("job_id IS NULL"),
)
def downgrade():
op.drop_index("unq_failurereason_buildonly", table_name="failurereason")
| [
"[email protected]"
] | |
a1ebf96d93a3e1ae78d6189b078630bb4fcf8d52 | 7f90f49237b30e404161b4670233d023efb7b43b | /第二章 python核心/HX02_linux系统编程/01进程/test/jc10_子进程多种方式小结.py | a42c62f02979b3b07ae8548d92ebb3d3b86fd1b6 | [] | no_license | FangyangJz/Black_Horse_Python_Code | c5e93415109699cc42ffeae683f422da80176350 | 34f6c929484de7e223a4bcd020bc241bb7201a3d | refs/heads/master | 2020-03-23T01:52:42.069393 | 2018-07-14T12:05:12 | 2018-07-14T12:05:12 | 140,942,688 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Fangyang time:2018/3/31
# (1) fork, 只用于linux (不推荐)
ret = os.fork()
if ret == 0:
# 子进程
else:
# 父进程
# (2) Process(target=xxx), 还有一个 class Xxx(Process):
p1 = Process(target=func)
p1.start()
# 主进程也能干点活
# (3) pool (推荐)
pool = Pool(3)
pool.apply_async(xxxx)
# 主进程一般用来等待, 不干活, 真正的任务在子进程中执行 | [
"[email protected]"
] | |
ac4b87c2ef8d46c4149984f849a04f5e20b3fc0e | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/telemetry/telemetry/timeline/sample.py | 806f60fafa2635a581485698ceee0eed38121471 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 713 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event as timeline_event
class Sample(timeline_event.TimelineEvent):
"""A Sample represents a sample taken at an instant in time
plus parameters associated with that sample.
NOTE: The Sample class implements the same interface as
Slice. These must be kept in sync.
All time units are stored in milliseconds.
"""
def __init__(self, parent_thread, category, name, timestamp, args=None):
super(Sample, self).__init__(
category, name, timestamp, 0, args=args)
self.parent_thread = parent_thread
| [
"[email protected]"
] | |
a0d9c35a415b9dd7d28d35a0995ae5dc81209c6a | 4dd1d8fa59e20061e2c12e540fc52b1b305e575b | /source/sims/s89/plt-exact-sol.py | 0d82396776954a630e3f77a1be11e7c2991767ef | [
"MIT"
] | permissive | ammarhakim/ammar-simjournal | f63521906a97d55ab290a5960d94758139944c89 | 5019f4723e20db80a20db6f2bd454c2fd3241412 | refs/heads/master | 2023-06-08T08:18:11.722779 | 2023-06-02T15:06:43 | 2023-06-02T15:06:43 | 204,050,516 | 3 | 3 | null | 2022-02-01T16:53:13 | 2019-08-23T18:28:44 | Lua | UTF-8 | Python | false | false | 765 | py | import pylab
import tables
import math
import numpy
def exactSol(a, b, X):
c0 = -(1/2.0 + a/12.0 + b/30.0)
c1 = 0.0
return X**2/2 + a*X**4/12 + b*X**6/30 + c0*X + c1
fh = tables.openFile("s89-poisson-o3-1d_phi.h5")
q = fh.root.StructGridField
nx, nc = q.shape
Xf = pylab.linspace(0, 1, nx)
qe = q[:,0]
dx = Xf[1]-Xf[0]
Xm = pylab.linspace(0.5*dx, 1-0.5*dx, nx-1)
qm = q[:-1,1]
a = 2.0
b = -12.0
Xhr = pylab.linspace(0, 1, 101)
fhr = exactSol(a, b, Xhr)
# make plot comparing exact to numerical solution
pylab.plot(Xhr, fhr, '-r', Xf, qe, 'ok', Xm, qm, 'ok')
# compute error
fex_e = exactSol(a, b, Xf)
fex_m = exactSol(a, b, Xm)
error = (numpy.abs(fex_e-qe).sum() + numpy.abs(fex_m-qm).sum())/(nx+nx-1);
print "%g %g" % (dx, error)
pylab.show()
| [
"[email protected]"
] | |
5d892f45bb5ed49a45551cf2fc71ed94bdb0fec8 | 91365d8ef539a9952f048e1fef03b6f76a0ccf60 | /test/inductor/test_torchinductor.py | 53d0bc4f376ee0abfd531abf66ebe0ea8c747a09 | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | ppwwyyxx/pytorch | 6e68cd816e8197e298c50d7f0e82cc97aff4dbdf | 2883cb464810611c5de37b2ca06771582ddf5f83 | refs/heads/master | 2023-08-10T00:39:48.165007 | 2023-06-07T01:51:59 | 2023-06-07T01:51:59 | 160,557,191 | 3 | 3 | NOASSERTION | 2018-12-05T17:53:38 | 2018-12-05T17:53:37 | null | UTF-8 | Python | false | false | 208,514 | py | # Owner(s): ["module: inductor"]
import contextlib
import copy
import dataclasses
import functools
import importlib
import itertools
import math
import os
import random
import subprocess
import sys
import time
import typing
import unittest
import weakref
from typing import Tuple
from unittest.mock import patch
import numpy as np
import torch
import torch._dynamo
import torch.nn as nn
from torch._dispatch.python import enable_python_dispatcher
from torch._dynamo.testing import rand_strided, same
from torch._inductor.codegen.common import DataTypePropagation, OptimizationContext
from torch._inductor.utils import run_and_get_code, run_and_get_triton_code
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn import functional as F
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM80OrLater
from torch.testing._internal.common_device_type import _has_sufficient_memory
from torch.testing._internal.common_dtype import all_types
from torch.testing._internal.common_utils import (
DeterministicGuard,
IS_CI,
IS_MACOS,
IS_WINDOWS,
IS_X86,
skipIfRocm,
TEST_WITH_ASAN,
TestCase as TorchTestCase,
)
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_flatten, tree_unflatten
if IS_WINDOWS and IS_CI:
sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor yet\n"
)
if __name__ == "__main__":
sys.exit(0)
raise unittest.SkipTest("requires sympy/functorch/filelock")
importlib.import_module("functorch")
importlib.import_module("filelock")
from torch._inductor import config, test_operators
from torch._inductor.compile_fx import compile_fx, compile_fx_inner
from torch._inductor.utils import has_torchvision_roi_align
from torch.testing._internal.common_utils import slowTest
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
HAS_MULTIGPU = HAS_CUDA and torch.cuda.device_count() >= 2
HAS_AVX2 = "fbgemm" in torch.backends.quantized.supported_engines
aten = torch.ops.aten
requires_cuda = functools.partial(unittest.skipIf, not HAS_CUDA, "requires cuda")
requires_multigpu = functools.partial(
unittest.skipIf, not HAS_MULTIGPU, "requires multiple cuda devices"
)
skip_if_x86_mac = functools.partial(
unittest.skipIf, IS_MACOS and IS_X86, "Does not work on x86 Mac"
)
vec_dtypes = [torch.float, torch.bfloat16]
def run_fw_bw_and_get_code(fn):
def run_with_backward():
result = fn()
result.sum().backward()
return result
return run_and_get_code(run_with_backward)
class TestCase(TorchTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._stack = contextlib.ExitStack()
cls._stack.enter_context(
config.patch(
{
"debug": True,
"debug_index_asserts": True,
"cpp.min_chunk_size": 1,
"triton.autotune_pointwise": False, # too slow
"implicit_fallbacks": False,
"generate_intermediate_hooks": True,
}
)
)
@classmethod
def tearDownClass(cls):
cls._stack.close()
super().tearDownClass()
def setUp(self):
torch._dynamo.reset()
super().setUp()
self._start = time.perf_counter()
def tearDown(self):
super().tearDown()
torch._dynamo.reset()
if os.environ.get("ERROR_ON_SLOW") == "1":
elapsed = time.perf_counter() - self._start
assert elapsed < 120
class ToTuple(torch.nn.Module):
def forward(self, x):
return (x,)
@dataclasses.dataclass
class InputGen:
n: int
device: str
def dense(self):
return torch.randn((self.n, self.n), device=self.device)
def transposed(self):
return self.dense().transpose(0, 1)
def strided(self):
return torch.randn((self.n * 2, self.n * 3), device=self.device)[
self.n :, self.n :: 2
]
def broadcast1(self):
return torch.randn((self.n,), device=self.device)
def broadcast2(self):
return torch.randn((1, self.n, 1), device=self.device)
def broadcast3(self):
return torch.randn((1,), device=self.device)
def double(self):
return torch.randn((self.n, self.n), device=self.device, dtype=torch.double)
def int(self):
return torch.arange(self.n, device=self.device, dtype=torch.int32)
def compute_grads(args, kwrags, results, grads):
def gather_leaf_tensors(args, kwargs):
args, _ = tree_flatten(args)
kwargs, _ = tree_flatten(kwargs)
args = args + kwargs
leaf_tensors = [
arg for arg in args if isinstance(arg, torch.Tensor) and arg.requires_grad
]
return leaf_tensors
flat_results, _ = tree_flatten(results)
flat_diff_results = [r for r in flat_results if r.requires_grad]
assert len(flat_diff_results) > 0
leaf_tensors = gather_leaf_tensors(args, kwrags)
assert len(leaf_tensors) > 0
return torch.autograd.grad(
flat_diff_results,
leaf_tensors,
grads,
allow_unused=True,
retain_graph=True,
)
def clone_preserve_strides(x, device=None):
if not isinstance(x, torch.Tensor):
return x
buffer = torch.as_strided(
x, (x.untyped_storage().size() // x.element_size(),), (1,), 0
)
if not device:
buffer = buffer.clone()
else:
buffer = buffer.to(device, copy=True)
out = torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
return out
@patch.object(config, "debug", True)
def run_and_get_cpp_code(fn, *args, **kwargs):
torch._dynamo.reset()
import io
import logging
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
from torch._inductor.graph import output_code_log
output_code_log.addHandler(ch)
prev_level = output_code_log.level
output_code_log.setLevel(logging.DEBUG)
fn(*args, **kwargs)
s = log_capture_string.getvalue()
output_code_log.setLevel(prev_level)
output_code_log.removeHandler(ch)
return s
def check_model(
self: TestCase,
model,
example_inputs,
kwargs=None,
*,
atol=None,
rtol=None,
check_lowp=True,
exact_dtype=True,
nopython=True,
copy_to_cuda=True,
reference_in_float=True,
assert_equal=True,
check_gradient=False,
):
kwargs = kwargs or {}
torch._dynamo.reset()
ref_inputs = [clone_preserve_strides(x) for x in example_inputs]
ref_kwargs = kwargs
has_lowp_args = False
original_lowp_dtype = torch.half
if reference_in_float:
# check_lowp is ignored here, it's kept just to be able to call `common` with extra arg
def upcast_fn(x):
nonlocal has_lowp_args
if isinstance(x, torch.Tensor) and (
x.dtype == torch.float16 or x.dtype == torch.bfloat16
):
has_lowp_args = True
return x.float()
else:
return x
def get_original_lowp_dtype(example_inputs):
dtypes = [x.dtype for x in example_inputs if isinstance(x, torch.Tensor)]
dtype_set = set(dtypes)
return dtype_set.pop() if len(dtype_set) == 1 else torch.half
ref_inputs = list(map(upcast_fn, example_inputs))
ref_kwargs = {k: upcast_fn(v) for k, v in kwargs.items()}
if has_lowp_args:
original_lowp_dtype = get_original_lowp_dtype(example_inputs)
if hasattr(model, "to"):
model = model.to(torch.float)
torch.manual_seed(0)
correct = model(*ref_inputs, **ref_kwargs)
# downcast the model back if needed
if reference_in_float and has_lowp_args:
if hasattr(model, "to"):
model = model.to(original_lowp_dtype)
torch._inductor.metrics.reset()
called = False
def compile_fx_wrapper(model_, example_inputs_):
nonlocal called
called = True
return compile_fx(model_, example_inputs_)
def run(*ex, **kwargs):
return model(*ex, **kwargs)
run = torch._dynamo.optimize(compile_fx_wrapper, nopython=nopython)(run)
torch.manual_seed(0)
actual = run(*example_inputs, **kwargs)
# if not called:
# exp = torch._dynamo.explain(run, *example_inputs)
# print("Explain:", exp[0])
# for graph in exp[2]:
# print("Graph", graph)
assert called, "Ran graph without calling compile_fx"
assert type(actual) == type(correct)
correct_flat, correct_spec = tree_flatten(correct)
actual_flat, _ = tree_flatten(actual)
if reference_in_float:
correct_flat = tuple(
y.to(x.dtype)
if isinstance(y, torch.Tensor) and y.dtype.is_floating_point
else y
for x, y in zip(actual_flat, correct_flat)
)
correct = tree_unflatten(correct_flat, correct_spec)
if assert_equal:
self.assertEqual(
actual,
correct,
atol=atol,
rtol=rtol,
equal_nan=True,
exact_dtype=exact_dtype,
)
# In case of input mutations, check that inputs are the same
self.assertEqual(
ref_inputs,
example_inputs,
atol=atol,
rtol=rtol,
equal_nan=True,
# our testing sometimes uses higher precision inputs for the reference
exact_dtype=False,
)
else:
for correct_val, actual_val in zip(correct_flat, actual_flat):
if isinstance(correct_val, torch.Tensor):
assert correct_val.device == actual_val.device
assert correct_val.size() == actual_val.size()
assert correct_val.stride() == actual_val.stride()
assert correct_val.layout == actual_val.layout
if exact_dtype:
assert correct_val.dtype == actual_val.dtype
if check_gradient:
# generate random unit norm gradients
grads = [
torch.rand(r.shape, device=r.device, dtype=r.dtype)
for r in correct_flat
if r.requires_grad
]
for g in grads:
g /= g.norm()
correct_grad = compute_grads(ref_inputs, ref_kwargs, correct, grads)
flat_grads, _ = tree_flatten(correct_grad)
all_none_grads = all(x is None for x in flat_grads)
if all_none_grads:
# See Note [Detaching inputs that never need gradients]
# There are a handful of ops that can return None gradients, into of zero gradients.
# If all inputs to an AOTAutograd graph are supposed to get None gradients,
# AOTAutograd will end up forcing all of the outputs of the forward to not require grad.
# There's no easy fix to this (see the note above), although one option is to
# force any derivative formulas in core to return tensors of zeros instead of None.
flat_results, _ = tree_flatten(actual)
results_that_require_grad = [
x
for x in flat_results
if isinstance(x, torch.Tensor) and x.requires_grad
]
self.assertEqual(len(results_that_require_grad), 0)
else:
actual_grad = compute_grads(example_inputs, kwargs, actual, grads)
self.assertEqual(
actual_grad,
correct_grad,
atol=atol,
rtol=rtol,
equal_nan=True,
exact_dtype=exact_dtype,
)
torch._dynamo.reset()
@torch._inductor.config.patch("triton.cudagraphs", False)
def check_model_cuda(
self: TestCase,
model,
example_inputs,
kwargs=None,
*,
atol=None,
rtol=None,
check_lowp=True,
exact_dtype=True,
nopython=True,
copy_to_cuda=True,
reference_in_float=True,
assert_equal=True,
check_gradient=False,
):
kwargs = kwargs or {}
if hasattr(model, "to"):
model = model.to("cuda")
if copy_to_cuda:
example_inputs = tuple(
clone_preserve_strides(x, device="cuda") for x in example_inputs
)
check_model(
self,
model,
example_inputs,
kwargs,
atol=atol,
rtol=rtol,
exact_dtype=exact_dtype,
nopython=nopython,
reference_in_float=reference_in_float,
assert_equal=assert_equal,
check_gradient=check_gradient,
)
if check_lowp:
def downcast_fn(x):
if not isinstance(x, torch.Tensor) or not x.dtype == torch.float:
return x
return torch.empty_strided(
x.size(), x.stride(), device="cuda", dtype=torch.half
).copy_(x)
example_inputs = list(map(downcast_fn, example_inputs))
if hasattr(model, "to"):
model = model.to(torch.half)
if rtol is not None:
rtol = max(2e-3, rtol)
check_model(
self,
model,
example_inputs,
kwargs,
atol=atol,
rtol=rtol,
exact_dtype=exact_dtype,
nopython=nopython,
reference_in_float=reference_in_float,
assert_equal=assert_equal,
check_gradient=check_gradient,
)
def _run_and_assert_no_indirect_indexing(test_case, func, *args, **kwargs):
result, source_codes = run_and_get_code(func, *args, **kwargs)
for code in source_codes:
for line in code.split("\n"):
stmt = None
# Find indexing expressions
if ".load(" in line:
stmt = line.split(".load")[-1]
elif "tl.store" in line:
stmt = line.split(".store")[-1]
stmt = ",".join(stmt.split(",")[:-2]) # Remove store value and mask
elif ".store" in line:
stmt = line.split(".store")[-1]
elif "[" in line:
stmt = line.split("[")[-1].split("]")[0]
if stmt is None:
continue
# indirect indexing involves a `tmp` variable
test_case.assertTrue(
"tmp" not in stmt,
msg=f"Found indirect indexing in statement '{stmt}' from code:\n{code}",
)
return result
class SweepInputs2:
input_gen_types1 = [
"dense",
"transposed",
"strided",
"broadcast1",
"broadcast2",
"broadcast3",
"double",
"int",
]
input_gen_types2 = input_gen_types1
gen = None
@staticmethod
def kernel(a, b):
return (a + b,)
@classmethod
def gen_template(cls, name1, name2):
def test(self):
check_model(
self,
cls.kernel,
(
getattr(cls.gen, name1)(),
getattr(cls.gen, name2)(),
),
)
test.__name__ = f"test_{cls.gen.device}_{name1}_{name2}"
setattr(cls, test.__name__, test)
@classmethod
def populate(cls):
for name1 in cls.input_gen_types1:
for name2 in cls.input_gen_types2:
cls.gen_template(name1, name2)
class CommonTemplate:
def test_bool(self):
def fn(a, b):
return (
a + b,
a * b,
a & b,
a | b,
a ^ b,
torch.logical_and(a, b),
torch.logical_or(a, b),
torch.logical_not(a),
torch.sign(b),
)
self.common(
fn,
(
torch.tensor([True, False, True, False]),
torch.tensor([False, False, True, True]),
),
)
def test_add_const_int(self):
def fn(a):
return (a + 1, torch.add(a, 1, alpha=2))
self.common(fn, (torch.randn(32),))
def test_add_const_float(self):
def fn(a):
return (a + 1.5,)
self.common(fn, (torch.randn(32),))
def test_add_inplace_permuted(self):
def fn(x, y):
return x.add_(y)
x = torch.ones([2, 12, 13, 17]).transpose(1, 2)
y = torch.randn([2, 13, 1, 17])
self.common(fn, (x, y))
def test_concat_add_inplace(self):
def fn(x, y, z):
return torch.cat([x, y], dim=1).add_(z)
x = torch.randn([2, 12, 14, 14])
y = torch.randn([2, 12, 14, 14])
z = torch.randn([2, 24, 14, 14])
self.common(fn, (x, y, z))
def test_abs(self):
def fn(a):
return (a / (torch.abs(a) + 1),)
self.common(fn, (torch.randn(17),))
def test_sgn(self):
def fn(a):
return torch.sgn(a), torch.sgn(a + 1) - 1
self.common(fn, [torch.linspace(-10, 10, 41)])
def test_randn_generator(self):
def fn(a, generator):
torch.randn([20, 20], generator=generator, device=a.device)
self.common(fn, (torch.linspace(-10, 10, 41), None))
# generator not yet supported in dynamo
with self.assertRaisesRegex(torch._dynamo.exc.Unsupported, "Generator"):
self.common(fn, (torch.linspace(-10, 10, 41), torch.Generator(self.device)))
def test_sgn_extremal(self):
def fn(a):
return (torch.sgn(a),)
self.common(fn, [torch.tensor([np.nan, np.inf, -np.inf, 0])])
def test_max_min(self):
def fn(a, b):
return (torch.maximum(a, b), torch.minimum(a, b))
self.common(fn, (torch.randn(8), torch.randn(8)))
t1 = torch.randn(8)
t1[0] = float("nan")
t2 = torch.randn(8)
t2[1] = float("nan")
self.common(fn, (t1, t2))
def test_neg_max_uint8(self):
# https://github.com/pytorch/pytorch/issues/93380
def fn(a, b):
c = torch.neg(a)
return torch.maximum(b, c)
a = torch.randint(256, (1,), dtype=torch.uint8)
b = torch.randint(256, (8390,), dtype=torch.uint8)
self.common(fn, (a, b))
def test_compar(self):
def fn(x):
return x.gt(3.5), x.ge(3.5), x.eq(3.5), x.le(2.5), x.lt(3.5), x.ne(3.5)
a = torch.tensor([3])
self.common(fn, (a,))
def test_horizonal_fusion1(self):
def fn(a, b, c):
return (a + b, a - c, b * c)
self.common(
fn, (torch.randn(8, 16, 16), torch.randn(8, 16, 16), torch.randn(1, 16, 1))
)
def test_horizonal_fusion2(self):
def fn(a, b, c):
return a + 1, b + 2, c + 3
self.common(fn, (torch.randn(8, 16, 8), torch.randn(8, 16), torch.randn(16, 8)))
def test_vertical_fusion1(self):
def fn(sa, ct, p):
# From torchbench.pyhpc_equation_of_state
v17 = -3.087032500374211e-7
v18 = -1.988366587925593e-8
v19 = -1.061519070296458e-11
v20 = 1.550932729220080e-10
t15 = v19 * ct
t19 = v17 + ct * (v18 + t15) + v20 * sa
t20 = 1.0 / t19
t128 = t19 * p
return t20 + t128
self.common(
fn,
(
torch.randn(204, 204, 26),
torch.randn(204, 204, 26),
torch.randn(26),
),
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_forced_buffer_realize(self):
# Test torch._test_inductor_realize forces a buffer to be realized
def fn(a):
b = test_operators.realize(a * 2)
return (b * 2,)
self.common(fn, (torch.randn(10),))
self.assertEqual(torch._inductor.metrics.ir_nodes_pre_fusion, 2)
def test_scheduler_vertical_fusion1(self):
realize = test_operators.realize
def fn(sa, ct, p):
# From torchbench.pyhpc_equation_of_state
v17 = -3.087032500374211e-7
v18 = -1.988366587925593e-8
v19 = -1.061519070296458e-11
v20 = 1.550932729220080e-10
t15 = realize(v19 * ct)
t19 = realize(v17 + ct * (v18 + t15) + v20 * sa)
t20 = realize(1.0 / t19)
t128 = realize(t19 * p)
return t20 + t128
self.common(
fn,
(
torch.randn(204, 204, 26),
torch.randn(204, 204, 26),
torch.randn(26),
),
)
self.assertEqual(torch._inductor.metrics.ir_nodes_pre_fusion, 5)
self.assertEqual(
torch._inductor.metrics.generated_kernel_count,
1 if self.device == "cuda" else 3,
)
def test_index_propagation(self):
def flip(x):
i = torch.arange(x.size(0) - 1, -1, -1, device=x.device)
return x[i]
x = torch.randn(8, device=self.device)
flip_opt = torch._dynamo.optimize("inductor")(flip)
expect = flip(x)
actual = _run_and_assert_no_indirect_indexing(self, flip_opt, x)
self.assertEqual(expect, actual)
def test_index_propagation_floordiv(self):
def repeat_interleave(x, n):
# e.g. x=[1, 2, 3], n=2 => returns [1, 1, 2, 2, 3, 3]
i = torch.arange(x.shape[0] * n, device=x.device)
return x[i // n]
x = torch.randn(8, device=self.device)
repeat_interleave_opt = torch._dynamo.optimize("inductor")(repeat_interleave)
# this should be collapsed to direct indexing
actual = _run_and_assert_no_indirect_indexing(self, repeat_interleave_opt, x, 3)
expect = torch.repeat_interleave(x, 3)
self.assertEqual(expect, actual)
self.assertEqual(actual, repeat_interleave(x, 3))
def test_index_propagation_remainder(self):
def repeat(x, n):
# e.g. x=[1, 2, 3], n=2 => returns [1, 2, 3, 1, 2, 3]
i = torch.arange(x.shape[0] * n, device=x.device)
return x[i % x.shape[0]]
x = torch.randn(8, device=self.device)
repeat_opt = torch._dynamo.optimize("inductor")(repeat)
# this should be collapsed to direct indexing
actual = _run_and_assert_no_indirect_indexing(self, repeat_opt, x, 3)
expect = x.repeat(3)
self.assertEqual(expect, actual)
self.assertEqual(actual, repeat(x, 3))
def test_computed_buffer_inlining(self):
def flip(x):
idx = torch.arange(x.size(0) - 1, -1, -1, device=x.device)
return x[idx], idx
flip_opt = torch._dynamo.optimize("inductor")(flip)
x = torch.randn(8, device=self.device)
expect = flip(x)
actual = _run_and_assert_no_indirect_indexing(self, flip_opt, x)
self.assertEqual(expect, actual)
def test_sum1(self):
def fn(a, b):
return ((a + b).sum(-1),)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_sum2(self):
def fn(a, b):
return ((a + b).sum([1, 2]), (a + b).sum(-1))
self.common(fn, (torch.randn(8, 9, 3, 21), torch.randn(8, 9, 3, 21)))
def test_sum3(self):
def fn(a, b):
r1 = a + b
r2 = r1.sum(-1)
r3 = torch.squeeze(b) + 10
return (r1, r2, r3)
# Mismatched elements: 2 / 10 (20.0%)
# Greatest absolute difference: 0.0029296875 at index (8,) (up to 1e-05 allowed)
# Greatest relative difference: 0.0017482517482517483 at index (6,) (up to 0.001 allowed)
self.common(fn, (torch.randn(10, 10), torch.randn(1, 10)), atol=1e-5, rtol=2e-3)
def test_sum4(self):
def fn(a):
b = a + 1
c = b.sum(-1)
d = c + 3
e = d.sum(-1)
f = e + 5
return (f, e, d, c, b)
self.common(fn, (torch.randn(1, 16, 8, 8),))
def test_sum5(self):
def fn(a):
b = a + 1
c = b.sum(-1)
d = c + 3
e = d.sum(-1)
f = e + 5
return (f,)
self.common(fn, (torch.randn(1, 17, 8, 9),))
def test_reduction1(self):
def fn(a):
return (a.sum(), a.max(), a.min(), a.argmax(), a.argmin())
self.common(fn, (torch.tensor([float("-inf"), 0.0, float("inf")]),))
@skip_if_x86_mac()
def test_reduction2(self):
def fn(a):
# FIXME: a.argmax
return (a.sum(), a.max(), a.min(), a.argmin())
self.common(fn, (torch.full((4,), float("inf")),))
@skip_if_x86_mac()
def test_reduction3(self):
def fn(a):
# FIXME: a.argmin
return (a.sum(), a.max(), a.min(), a.argmax())
self.common(fn, (torch.full((4,), float("-inf")),))
def test_reduction4(self):
if self.device == "cpu":
raise unittest.SkipTest("Non-deterministic CPU results")
def fn(a):
return (a.argmax(-1), a.argmin(-1))
inputs = (torch.ones(128), torch.ones(4, 4, 1))
for i in inputs:
self.common(fn, (i,))
@config.patch(unroll_reductions_threshold=1)
def test_reduction5(self):
if self.device == "cpu":
raise unittest.SkipTest("Non-deterministic CPU results")
def fn(a):
return (a.sum(), a.max(), a.min(), a.argmax())
self.common(fn, (torch.full((4,), float("-inf")),))
def test_prod(self):
def fn(a):
return a.prod(0), a.prod(1), a.prod()
self.common(fn, (torch.rand((10, 10)),))
self.common(fn, (torch.rand((1, 2050)),))
def test_unroll_small_reduction(self):
def fn(x):
val1, index1 = x.min(-1)
val2, index2 = x.max(-1)
return (
val1,
index1,
val2,
index2,
x.sum(-1),
(x > 1).any(-1),
(x > 0).all(-1),
x.argmin(-1),
x.argmax(-1),
x.amin(-1),
x.amax(-1),
x.aminmax(),
)
with config.patch(unroll_reductions_threshold=8):
# small sized reductions will get unrolled
self.common(fn, (torch.randn(8, 3),))
torch._dynamo.reset()
with config.patch(unroll_reductions_threshold=1):
# make sure things also work if they aren't unrolled
self.common(fn, (torch.randn(8, 3),))
def test_multilayer_low_prec(self):
# fp16 nyi for cpu
if self.device == "cpu":
raise unittest.SkipTest("requires CUDA")
def fn(a):
return torch.mean(a)
self.common(fn, ((torch.rand((10, 3, 352, 352), dtype=torch.float16),)))
def test_expanded_reduction(self):
if self.device == "cpu":
raise unittest.SkipTest(
"https://github.com/pytorch/torchdynamo/issues/1697"
)
def fn(x, y):
z = x * y
return z.sum((0, 1))
self.common(fn, (torch.randn(2, 197, 256), torch.randn(2, 1, 256)))
def test_min_max_reduction(self):
def fn(a, b):
return (
(a + b).max(),
(a + b).min(),
torch.amax(a + 1, keepdim=True),
torch.amin(b + 1, keepdim=True),
)
dtypes = [torch.float, torch.float16]
if not (self.device == "cuda" and not SM80OrLater):
dtypes += [torch.bfloat16]
for dtype in dtypes:
self.common(fn, (torch.randn(8, 8).to(dtype), torch.randn(8, 8).to(dtype)))
def test_min_max_reduction_nan(self):
def fn(a):
return (torch.max(a), torch.min(a))
t1 = torch.randn(32)
t1[16] = float("nan")
self.common(fn, (t1,))
def test_fmin_fmax(self):
def fn(a, b):
return (
torch.fmin(a, b),
torch.fmax(a, b),
torch.fmax(a + 1, torch.tensor(0.0)),
)
self.common(
fn,
(
torch.tensor(
[-10.0, 10.0, float("nan"), float("nan"), float("nan"), 3, 4]
),
torch.tensor(
[float("nan"), float("nan"), -10.0, 10.0, float("nan"), 4, 3]
),
),
)
def test_sum_int(self):
def fn(x):
return 2 * x.sum(-1) + x.sum()
dtypes = torch.bool, torch.uint8, torch.int
inps = [torch.randint(2, (64,), dtype=dtype) for dtype in dtypes]
for i in inps:
self.common(fn, (i,), check_lowp=False)
def test_sum_dtype(self):
def fn(x):
return x * x.sum(-1, dtype=torch.double) + x.sum(dtype=torch.double)
self.common(fn, (torch.ones(32, 32) * 70,))
def test_clamp(self):
def fn(a, b):
return (a.clamp(-0.1, 0.1), b.clamp(0), torch.clamp(a + b, max=0))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_clamp_type_promotion(self):
def fn(a):
b = torch.tensor(1.0, dtype=torch.double, device=self.device)
c = torch.full((4,), 2, device=self.device)
return a.clamp(min=b, max=c)
self.common(fn, (torch.randint(4, (4,)),))
def test_arange1(self):
def fn(x):
rng1 = torch.arange(8 * 8, dtype=torch.float32, device=x.device).view(8, 8)
rng2 = torch.arange(10, 18, device=x.device)
tmp = x * rng1
return tmp, tmp + rng2
self.common(fn, (torch.randn(8, 8),))
def test_arange2(self):
def fn(x):
rng1 = torch.arange(8, device=x.device)
return (x + rng1,)
self.common(fn, (torch.randint(4, (8, 8)),), check_lowp=False)
def test_arange3(self):
def fn(x):
return x + torch.ops.aten.arange.start_step(
0, 53, 4, dtype=torch.int64, device=x.device
)
self.common(fn, (torch.randn(14),))
def test_arange4(self):
def fn(x):
return x - torch.arange(512, -512, -1.0, device=x.device)
self.common(fn, (torch.randn(1024),))
def test_arange5(self):
def fn(step, device):
return torch.arange(512, -512, step, device=device)
compiled_fn = torch._dynamo.optimize()(fn)
# NOTE: use assertEqual to check dtypes which self.common doesn't do
for step in (-1, -1.0):
expect = fn(step, self.device)
actual = compiled_fn(step, self.device)
self.assertEqual(expect, actual)
self.assertEqual(expect, actual)
def test_arange6(self):
def fn(x):
return torch.arange(0.1, 8.0001, 1, dtype=x.dtype, device=x.device)
# Test that float arguments are truncated to int when dtype is set explicitly
make_arg = functools.partial(make_tensor, device="cpu", requires_grad=False)
self.common(fn, (make_arg(1, dtype=torch.float32),))
self.common(fn, (make_arg(1, dtype=torch.int64),))
def test_linspace1(self):
def fn(x):
return torch.linspace(0.125, 0.875, 7, device=x.device) + x
self.common(fn, (torch.randn(1, 7),))
def test_linspace2(self):
def fn(x):
return torch.linspace(0, 2, 1, device=x.device) + x
self.common(fn, (torch.randn(1, 1),))
def test_linspace3(self):
def fn(x):
return torch.linspace(0, 2, 0, device=x.device)
self.common(fn, (torch.Tensor([]),))
def test_tensor1(self):
def fn(x):
return torch.tensor([1], device=x.device) + x, torch.tensor(
5, device=x.device
)
self.common(fn, (torch.randn(10),))
def test_tensor2(self):
def fn(x):
return torch.tensor(list(range(2, 40, 2)), device=x.device) + x
self.common(fn, (torch.randn(1),))
def test_tensor3(self):
def fn(x):
return (
torch.tensor([], device=x.device),
torch.tensor([1, 2], device=x.device) + 1,
torch.tensor([1, 2, 3], device=x.device) + 2,
torch.tensor([1, 2, 3, 4], device=x.device) + x,
)
self.common(fn, [torch.randn(4)])
def test_views1(self):
def fn1(x, y):
return (x.view(size2) + y,)
def fn2(x, y):
return ((x + 1).view(size2) + y,)
views = [
([5 * 7], [5, 7]),
([2 * 3 * 4 * 5 * 6 * 7], [2, 3, 4, 5, 6, 7]),
([2 * 3, 4, 5, 6 * 7], [2, 3, 4, 5, 6, 7]),
([10 * 5, 20], [10, 5, 20]),
([1, 10, 1], [10]),
([10, 1, 10, 1, 10], [10, 100]),
([2, 2, 2, 2], [4, 4]),
]
for size1, size2 in views:
self.common(fn1, (torch.randn(size1), torch.randn(size2)))
self.common(fn2, (torch.randn(size1), torch.randn(size2)))
for size2, size1 in views:
self.common(fn1, (torch.randn(size1), torch.randn(size2)))
self.common(fn2, (torch.randn(size1), torch.randn(size2)))
def test_views2(self):
def fn1(x):
return (x.view(size2) + 1,)
def fn2(x):
return ((x * 2).view(size2) + 1,)
for size1, size2 in [
([2, 2, 2, 2], [4, -1]),
([10, 1, 10, 1, 10], [-1, 100]),
([10 * 5, 20], [10, -1, 20]),
]:
self.common(fn1, (torch.randn(size1),))
self.common(fn2, (torch.randn(size1),))
def test_views3(self):
# example taken from hf_BigBird
def forward(arg1, arg2):
index = torch.ops.aten.index(arg1, [arg2])
view_1 = torch.ops.aten.view(index, [1, 2232, 64])
view_2 = torch.ops.aten.view(view_1, [1, 12, 62, 192])
return view_2
self.common(
forward,
(
rand_strided((64, 64), (64, 1), torch.float32),
rand_strided((2232,), (1,), torch.int64),
),
)
def test_views4(self):
# example taken from hf_BigBird
def forward(arg1, arg2):
arg1 = arg1.index_select(0, arg2)
arg1 = torch.ops.aten.view(arg1, [2, 3, 4, 5, 5])
arg1 = torch.ops.aten.view(arg1, [2, 3, 2, 10, -1])
return arg1
self.common(
forward,
(
torch.randn(12, 5, 5),
torch.randint(0, 11, (24,)),
),
)
def test_views5(self):
# tensor with shape 0 in any dimension
def forward(x):
y = x[:, 4:]
return y.view(len(y), -1, 4)
self.common(
forward,
(torch.randn(4, 4, 4, 4),),
)
def test_views6(self):
def forward(x):
x = torch.ops.aten.relu(x)
s = torch.ops.aten.slice(x, 0, 0, 9223372036854775807)
s = torch.ops.aten.slice(s, 1, 0, 9223372036854775807)
s = torch.ops.aten.slice(s, 3, 0, 0)
y = torch.ops.aten.view(s, [4, 2, -1])
return y
self.common(
forward,
(torch.randn(4, 2, 4, 4),),
)
def test_relu(self):
def fn(a, b):
return (torch.relu(a), torch.relu(a + b) / 10)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_exp(self):
def fn(a, b):
return (torch.exp(a), torch.exp(a + b))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_exp2(self):
def fn(a, b):
return (torch.exp2(a), torch.exp2(a + b), torch.pow(2, -torch.abs(a - b)))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_sigmoid(self):
def fn(a, b):
return (torch.sigmoid(a), torch.sigmoid(a + b))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_round(self):
def fn(a, b):
return torch.round(a), torch.round(b + 1), torch.round(a, decimals=2)
# without manual_seed, there is some chance this test fails due to:
# https://github.com/openai/triton/issues/530
torch.manual_seed(0)
# with *100 we are always getting a number exactly at .5 which we don't do right in half
self.common(fn, (torch.randn(8, 8) * 100, torch.randn(8, 8) * 10))
def test_round_correctness(self):
if self.device == "cuda":
raise unittest.SkipTest("need to debug tl.libdevice on A100/V100")
def fn(a):
return torch.round(a)
self.common(
fn,
[torch.arange(-10, 10, 0.1, dtype=torch.float64)],
check_lowp=False,
)
def test_silu(self):
def fn(a):
return (torch.nn.functional.silu(a),)
self.common(fn, (torch.randn(8, 8),))
# TODO(voz): Re-enable this test ASAP https://github.com/pytorch/pytorch/issues/82763
@unittest.skip("Skipping due to op bugs")
def test_nan_to_num(self):
def fn(a):
return (
torch.nan_to_num(a),
torch.nan_to_num(a, nan=3.0),
torch.nan_to_num(a, nan=None),
torch.nan_to_num(a, posinf=4.0),
torch.nan_to_num(a, neginf=5.0),
torch.nan_to_num(a, nan=3.0, posinf=4.0, neginf=5.0),
)
self.common(
fn,
(torch.tensor((float("nan"), float("inf"), float("-inf"), 1.0)),),
check_lowp=False, # a much more elaborate test is required to match finfo max's for float and half
)
def test_div1(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(fn, (torch.randn(8, 8) * 100, torch.randn(8, 8) * 100))
def test_div2(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(fn, (torch.randint(-100, 100, [8, 8]), 100 * torch.randn(8, 8)))
def test_div3(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
a = torch.randint(1, 100, [8, 8])
self.common(fn, (a * 2, a))
def test_div4(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(
fn,
(torch.randint(-100, 0, [8, 8]), torch.randint(1, 10, [8, 8])),
)
def test_div5(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
# divide a scalar
self.common(fn, (torch.randint(-100, 0, [8, 8]), 16))
def test_div6(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
# treat boolean as integer
self.common(
fn,
(torch.ones([8, 8], dtype=torch.bool), torch.randint(-100, -1, [8, 8])),
)
def test_div7(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(
fn,
(
torch.randint(2**32, 2**40, [100, 100]),
torch.randint(-10, -1, [100, 100]),
),
)
def test_div8(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(fn, (1024, 100))
def test_div_zero_dim(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
for dtype in (torch.float32, torch.int64):
self.common(
fn,
(
make_tensor(10, device="cpu", dtype=dtype),
make_tensor((), device="cpu", dtype=dtype, exclude_zero=True),
),
)
self.common(
fn,
(
make_tensor((), device="cpu", dtype=dtype),
make_tensor(10, device="cpu", dtype=dtype, exclude_zero=True),
),
)
def test_div_prim(self):
def fn(a, b):
return (torch.ops.prims.div(a, b),)
for dtype in (torch.float32, torch.int64):
self.common(
fn,
(
make_tensor(100, device="cpu", dtype=dtype),
make_tensor(100, device="cpu", dtype=dtype, exclude_zero=True),
),
)
def test_both_scalars(self):
def fn(a, b):
return (
aten.add(a, b),
aten.add(b, a),
aten.sub(a, b),
aten.sub(b, a),
aten.mul(a, b),
aten.mul(b, a),
)
self.common(fn, (4, 3.3), reference_in_float=False)
def test_sum_keepdims(self):
def fn(a, b):
return (torch.sum(a + b, -1, keepdim=True),)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_large_tensor_reduction(self):
if not _has_sufficient_memory(self.device, 4.5 * 1024**3): # 4.5 GiB
raise unittest.SkipTest("insufficient memory")
if self.device == "cpu":
raise unittest.SkipTest("Fails on CPU")
# Test 64-bit indexing works correctly
def fn(a):
return torch.max(a)
t = torch.ones(2**32, dtype=torch.int8, device=self.device)
t[-1] = 2
# self.common OOMs here because it copies inputs to check for mutations
compiled_fn = torch._dynamo.optimize()(fn)
actual = compiled_fn(t)
expect = torch.tensor(2, dtype=torch.int8, device=self.device)
self.assertEqual(actual, expect)
def test_large_broadcast_reduction(self):
if self.device == "cpu":
raise unittest.SkipTest("Fails on CPU")
# Test 64-bit indexing works correctly when inputs are less than 32-bit
# but intermediate tensors require 64-bit indexing
def fn(a, b):
return torch.max(a + b)
t1 = torch.ones(1, 2**16, dtype=torch.int8, device=self.device)
t2 = torch.ones(2**16, 1, dtype=torch.int8, device=self.device)
t1[-1, -1] = 2
t2[-1, -1] = 2
# self.common OOMs here because it copies inputs to check for mutations
compiled_fn = torch._dynamo.optimize()(fn)
actual = compiled_fn(t1, t2)
expect = torch.tensor(4, dtype=torch.int8, device=self.device)
self.assertEqual(actual, expect)
def test_large_pointwise(self):
if not _has_sufficient_memory(self.device, 2 * (2**31 + 1)):
raise unittest.SkipTest("insufficient memory")
def fn(a):
return a + 1
t = torch.ones(2**31 + 1, dtype=torch.int8, device=self.device)
compiled_fn = torch._dynamo.optimize()(fn)
actual = compiled_fn(t)
# Can't use assertEqual as it expands broadcasted inputs
del t
if torch.device(self.device).type == "cuda":
torch.cuda.empty_cache()
self.assertTrue((actual == 2).all())
def test_large_offset_pointwise(self):
# Test 64-bit indexing is used when input views a tensor that can be
# indexed with 32-bit strides but the storage offset pushes it over
# INT_MAX
if not _has_sufficient_memory(self.device, (2**31 + 1) + (2**30 + 1)):
raise unittest.SkipTest("insufficient memory")
def fn(a):
return a + 4
t = torch.ones(2**31 + 1, dtype=torch.int8, device=self.device)
t[2**30 :] = 0
compiled_fn = torch._dynamo.optimize()(fn)
actual = compiled_fn(t[2**30 :])
self.assertTrue((actual == 4).all())
def test_large_strided_reduction(self):
# Test 64-bit indexing is used when input numel is less than INT_MAX
# but stride calculations go above INT_MAX
if not _has_sufficient_memory(self.device, 2**31 + 2):
raise unittest.SkipTest("insufficient memory")
def fn(a):
return torch.max(a)
storage = torch.ones(2**31 + 1, dtype=torch.int8, device=self.device)
view = storage[::32]
view[-1] = 2
compiled_fn = torch._dynamo.optimize()(fn)
actual = compiled_fn(view)
expect = torch.tensor(2, dtype=torch.int8, device=self.device)
self.assertEqual(actual, expect)
def test_softmax(self):
def fn(a, b):
return (torch.softmax(a + b, -1), torch.softmax(a, 0), torch.softmax(b, 1))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_log_softmax(self):
def fn(a, b):
return (F.log_softmax(a + b, -1), F.log_softmax(a, 0), F.log_softmax(b, 1))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_transpose(self):
def fn(a, b):
return (
torch.t(a) + b,
torch.transpose(b * 2, 0, 1) + 10,
)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_permute1(self):
def fn(a):
return (
torch.permute(a + 1, [2, 1, 4, 0, 3]) + 2,
torch.permute(a, [2, 1, 4, 0, 3]) + 2,
)
self.common(fn, (torch.randn(2, 2, 2, 2, 2),))
def test_permute2(self):
def fn(a):
a = a.unfold(0, 2, 1)
a = torch.unsqueeze(a, 1)
a = torch.permute(a, [0, 2, 3, -3])
return (a,)
self.common(fn, (torch.randn(4, 4),))
def test_expand(self):
def fn(a):
return (
(a + 1).expand(3, 4, 2, 3, 2) + 2,
a.expand(2, 1, 2, 3, 2) + 2,
), a.expand(2, -1, 5, -1)
self.common(fn, (torch.randn(2, 1, 2),))
def test_squeeze1(self):
def fn(a):
return ((a + 1).squeeze() + 2, a.squeeze() + 2)
self.common(fn, (torch.randn(1, 2, 1, 2, 2, 1, 1),))
def test_squeeze2(self):
def fn(a):
return ((a + 1).squeeze(-1).squeeze(2) + 2, a.squeeze(0) + 2)
self.common(fn, (torch.randn(1, 2, 1, 2, 2, 2, 1),))
def test_simplify_loops(self):
def fn(a, b):
return a + b
self.common(
fn,
(
torch.randn(2, 3, 4, 5, 6),
torch.randn(4, 2, 3, 5, 6).permute(1, 2, 0, 3, 4),
),
)
def test_unsqueeze(self):
def fn(a):
return (
torch.unsqueeze(a + 1, -1) + 2,
torch.unsqueeze(a, 2) + 2,
torch.unsqueeze(a + 1, 0) + 2,
torch.unsqueeze(a, -2) + 2,
)
self.common(
fn,
(
torch.randn(
2,
2,
2,
2,
),
),
)
def test_unsqueeze_inplace(self):
def fn(a):
tmp1 = a + 1
aten.unsqueeze_(tmp1, 2)
tmp2 = aten.unsqueeze_(a + 1, 0) + 2
return (tmp1, tmp2)
self.common(
fn,
(
torch.randn(
2,
2,
2,
2,
),
),
)
def test_addmm(self):
def fn(a, b, c):
return (torch.addmm(a + 1, b + 2, c + 3) + 4,)
self.common(
fn,
(
torch.randn(8, 8),
torch.randn(8, 8),
torch.randn(8, 8),
),
)
# https://github.com/pytorch/pytorch/issues/98979
@unittest.skipIf(HAS_CUDA, "cuda failed for float64 linear")
def test_linear_float64(self):
mod = torch.nn.Sequential(torch.nn.Linear(8, 16).to(torch.float64)).eval()
with torch.no_grad():
self.common(mod, (torch.randn(2, 8).to(torch.float64),))
def test_linear1(self):
mod = torch.nn.Sequential(
torch.nn.Linear(8, 16),
torch.nn.Sigmoid(),
ToTuple(),
)
self.common(mod, (torch.randn(2, 8),))
def test_linear2(self):
mod = torch.nn.Sequential(
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
)
self.common(mod, (torch.randn(2, 8),))
def test_bmm1(self):
def fn(a, b):
return (
torch.bmm(a, b),
torch.bmm(a + 1, b + 2) + 3,
)
self.common(
fn,
(
torch.randn(2, 8, 8),
torch.randn(2, 8, 8),
),
check_lowp=False,
)
self.common(
fn,
(
torch.randn(1, 16, 8),
torch.randn(1, 8, 10),
),
check_lowp=False,
)
def test_bmm2(self):
def fn(a, b):
return torch.bmm(a.permute(0, 2, 1), b)
self.common(
fn,
(
torch.randn(1, 8, 8),
torch.randn(1, 8, 8),
),
check_lowp=False,
)
def test_scalar_input(self):
def fn(x, y):
a = torch.div(x, y, rounding_mode="floor")
return a
self.common(fn, [torch.randint(5, (1, 8)), 5400])
def test_shape_prop_torch_ones(self):
class Model(torch.nn.Module):
def forward(self, attention_scores):
extended_attention_mask = torch.ones(
8, 1, 1, 512, device=attention_scores.device
)
attention_scores = attention_scores + extended_attention_mask
return attention_scores
mod = Model().eval()
with torch.no_grad():
self.common(
mod,
(torch.randn(8, 12, 512, 512),),
)
@slowTest
def test_conv_bn_fuse(self):
# For gpu path, there is an accuracy issue
if self.device == "cuda":
raise unittest.SkipTest("only support cpu conv bn test")
input_shapes = {1: (112,), 2: (112, 112), 3: (55, 55, 55)}
conv_modules = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
bn_modules = {
1: torch.nn.BatchNorm1d,
2: torch.nn.BatchNorm2d,
3: torch.nn.BatchNorm3d,
}
options = itertools.product(
[1, 2, 3],
[True, False],
[1, 3],
[1, 2],
[1, 4],
)
for (
dim,
bias,
kernel_size,
dilation,
groups,
) in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
mod = torch.nn.Sequential(
conv_modules[dim](
iC,
oC,
kernel_size=kernel_size,
dilation=dilation,
groups=groups,
bias=bias,
),
bn_modules[dim](oC),
).eval()
test_memory_format = [torch.contiguous_format]
# TODO: GPU path doesn't support channels_last now.
if not HAS_CUDA and dim > 1:
channels_last = (
torch.channels_last if dim == 2 else torch.channels_last_3d
)
test_memory_format.append(channels_last)
for memory_format in test_memory_format:
v = torch.randn(x_shape, dtype=torch.float32).to(
memory_format=memory_format
)
with torch.no_grad():
self.common(
mod,
(v,),
)
def test_conv_functional_bn_fuse(self):
# For gpu path, there is an accuracy issue
if self.device == "cuda":
raise unittest.SkipTest("only support cpu conv bn test")
# Define a BatchNorm using functional BN.
class BatchNorm(torch.nn.BatchNorm2d):
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
**factory_kwargs,
)
def forward(self, x):
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(
self.num_batches_tracked
)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (
self.running_var is None
)
x = F.batch_norm(
x,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean
if not self.training or self.track_running_stats
else None,
self.running_var
if not self.training or self.track_running_stats
else None,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
return x
v = torch.randn(1, 3, 556, 56, dtype=torch.float32)
mod = torch.nn.Sequential(
torch.nn.Conv2d(
3,
64,
kernel_size=3,
dilation=1,
groups=1,
bias=True,
),
BatchNorm(64),
).eval()
with torch.no_grad():
self.common(
mod,
(v,),
)
def test_upsample_cat_conv(self):
if self.device == "cuda":
raise unittest.SkipTest("only support cpu upsample_cat_conv test")
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.upsample = torch.nn.UpsamplingNearest2d(scale_factor=2)
self.conv = torch.nn.Conv2d(
8,
5,
kernel_size=1,
padding=0,
stride=1,
dilation=1,
**kwargs,
)
def forward(self, x, y):
x = self.upsample(x)
z = torch.cat([x, y], dim=1)
z = self.conv(z)
return z
v1 = torch.randn([8, 2, 12, 26])
v2 = torch.randn([8, 6, 24, 52])
with torch.no_grad():
self.common(
M().eval(),
(v1, v2),
)
def test_aliased_buffer_reuse(self):
def fn(x, y):
x = 2 * x
y = 2 * y
c = torch.cat([x, y], dim=-1)
d = 1 + c
m = torch.mm(d, d)
return m[:, :2] + x
self.common(fn, (torch.randn(4, 2), torch.randn(4, 2)), check_lowp=False)
def test_view_detach(self):
def fn(a):
return a[0].detach()
self.common(
fn,
(torch.randn([4, 4], requires_grad=True),),
)
def test_gather1(self):
def fn(a, b):
return (
torch.gather(a.expand([4, 5, 10, 6]), 3, b + 1),
torch.gather(a.expand([4, 5, 10, 6]), -1, b + 1),
)
self.common(
fn,
(
torch.randn([1, 1, 10, 6]),
torch.randint(5, [4, 5, 10, 1], dtype=torch.int64),
),
)
def test_gather2(self):
# 0d tensor
def fn(a, b):
return torch.gather(a, 0, b) + torch.gather(a, -1, b)
x = torch.tensor(123)
y = torch.tensor(0)
self.assertEqual(fn(x, y), x + x)
def test_gather3(self):
def fn(a, b):
return torch.gather(a, 1, b, sparse_grad=True)
self.common(
fn,
(
torch.randn([4, 5, 10, 6], requires_grad=True),
torch.randint(5, [4, 5, 10, 1], dtype=torch.int64),
),
)
def test_slice1(self):
def fn(a):
return (
a[:, :10, 0] + a[:, 10:, 0],
(a + 1)[:, :10, 0] + (a + 1)[:, 10:, 0],
a[:, -30:, 0], # negative index out of range
a[:, :-30, 0], # negative index out of range
)
self.common(
fn,
(torch.randn([2, 20, 2]),),
)
def test_slice2(self):
def fn(a):
return (
a[:-1, ::2, -1] + a[-1:, 1::2, -2],
(a + 1)[:-1, ::2, -1] + (a + 2)[-1:, 1::2, -2],
)
self.common(
fn,
(torch.randn([2, 20, 2]),),
)
def test_split_with_sizes(self):
def fn(a, sizes):
return [t + 1.0 for t in torch.split(a * 2.0, sizes, -1)]
self.common(fn, (torch.randn(2, 2, 10), [3, 3, 4]))
self.common(fn, (torch.randn(2, 2, 10), [4, 3, 3]))
self.common(fn, (torch.randn(2, 2, 10), [1, 2, 3, 4]))
def test_split_with_sizes_failed(self):
@torch._dynamo.optimize("inductor")
def fn(a):
return torch.split(a, [2, 1, 1], dim=1)
with self.assertRaisesRegex(RuntimeError, ""):
fn(torch.randn(1, 5))
def test_inductor_assert(self):
@torch._dynamo.optimize("inductor", dynamic=True)
def fn(a):
assert a.shape[0] >= 2 and a.shape[1] >= 4
return a.cos()
inp = torch.randn(2, 4, 6)
torch._dynamo.mark_dynamic(inp, 0)
torch._dynamo.mark_dynamic(inp, 1)
self.assertEqual(fn(inp), inp.cos())
def test_split(self):
def fn(a):
t = torch.split(a, 3, -1)
return (t[0], t[1], t[2], t[3])
def fn2(a):
return fn(a + 1)
self.common(
fn,
(torch.randn([2, 2, 10]),),
)
self.common(
fn2,
(torch.randn([2, 2, 10]),),
)
def test_to_dtype(self):
def fn(a, b):
return (
aten._to_copy(a, dtype=6),
aten._to_copy(b + 1, dtype=6),
aten.to(b, torch.float64),
aten.to(b, torch.bool),
)
self.common(
fn,
(
torch.randn([2, 2, 10]),
torch.randn([2, 2, 10], dtype=torch.float64),
),
)
@requires_cuda()
def test_to_device(self):
def fn(a):
if a.device.type == "cpu":
return aten._to_copy(a, device=torch.device("cuda"), dtype=6, layout=0)
else:
return aten._to_copy(a, device=torch.device("cpu"), dtype=6, layout=0)
self.common(
fn,
(torch.randn([2, 2, 10]),),
)
def test_to_memory_format(self):
def fn(a, memory_format):
return a.to(memory_format=memory_format)
self.common(
fn,
(torch.randn([2, 2, 10, 10]), torch.channels_last),
)
self.common(
fn,
(
torch.randn([2, 2, 10, 10]).to(memory_format=torch.channels_last),
torch.contiguous_format,
),
)
@requires_cuda()
def test_to_device_constant(self):
def fn(a):
d1 = a.device.type
if d1 == "cpu":
d2 = "cuda"
else:
d2 = "cpu"
const1 = torch.as_tensor(list(range(64)), device=d2)
return (
torch.arange(10, device=d2).to(d1) + a,
const1.to(d1),
(const1 + 1).to(d1),
)
self.common(
fn,
(torch.randn([10]),),
)
@requires_cuda()
def test_multi_device(self):
def fn(x):
x = x + 1
x = x + 2
x = x.cuda()
x = x + 3
x = x + 4
x = x.cpu()
x = x + 5
x = x + 6
x = x.cuda()
x = x + 7
x = x + 8
x = x.cpu()
x = x + 9
x = x + 10
return x
self.common(
fn,
(torch.randn([2, 2, 10]),),
check_lowp=False, # cpu doesn't understand fp16, and there are explicit .cpu() calls
)
@requires_multigpu()
def test_multi_gpu_device(self):
# TODO: https://github.com/pytorch/pytorch/issues/92627
x = torch.rand([4], device="cuda")
def fn(x, y):
r = torch.ops.aten.div(x, y)
r = r.to("cuda:1")
return 2 * r
self.common(fn, (torch.randn(4), torch.randn(4)), check_lowp=False)
@skipIfRocm
@requires_multigpu()
def test_multi_gpu_recompile_on_index(self):
torch.set_float32_matmul_precision("high")
def gemm(x, y):
return x @ y
failed_guard = None
def fail(guard):
nonlocal failed_guard
failed_guard = guard
gemm_opt = torch._dynamo.optimize("inductor", guard_fail_fn=fail)(gemm)
x0 = torch.randn(1024, 1024, device="cuda:0")
y0 = torch.randn(1024, 1024, device="cuda:0")
gemm_opt(x0, y0)
x1 = torch.randn(1024, 1024, device="cuda:1")
y1 = torch.randn(1024, 1024, device="cuda:1")
gemm_opt(x1, y1)
self.assertTrue(failed_guard is not None)
self.assertTrue(
"tensor 'L['x']' Tensor device index mismatch. Expected device index to be"
in failed_guard.reason
)
def test_unbind(self):
def fn(a):
return torch.unbind(a), torch.unbind(a, -1)
self.common(
fn,
(torch.randn([4, 4, 4]),),
)
@skipIfRocm
def test_convolution1(self):
m = torch.nn.Sequential(
torch.nn.Conv2d(5, 6, [3, 3]),
torch.nn.ReLU(),
ToTuple(),
)
self.common(
m,
(torch.randn([2, 5, 16, 16]),),
# Mismatched elements: 10 / 2352 (0.4%)
# Greatest absolute difference: 5.7220458984375e-05 at index (0, 3, 12, 12) (up to 1e-05 allowed)
# Greatest relative difference: 0.06512477175897748 at index (0, 4, 11, 9) (up to 0.001 allowed)
atol=6e-5,
rtol=0.001,
)
def test_convolution2(self):
def fn(x, w, b):
# transposed conv
return (aten.convolution(x, w, b, [4], [0], [1], True, [0], 1),)
self.common(
fn,
(
torch.randn([2, 32, 90]),
torch.randn([32, 16, 8]),
torch.randn([16]),
),
check_lowp=False,
)
@skipIfRocm
def test_convolution3(self):
# Test stride or padding or dilation is 1 element list.
m = torch.nn.Sequential(
torch.nn.Conv2d(5, 6, [3, 3], stride=[1], padding=[0], dilation=[1]),
torch.nn.ReLU(),
ToTuple(),
)
self.common(
m,
(torch.randn([2, 5, 16, 16]),),
atol=6e-5,
rtol=0.001,
)
def test_conv2d_channels_last(self):
if self.device == "cuda":
raise unittest.SkipTest("only support cpu conv2d channels_last")
m = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 1, 1),
ToTuple(),
)
# only weight is channels_last
self.common(
m.to(memory_format=torch.channels_last),
(torch.randn([2, 3, 16, 16]),),
check_lowp=False,
)
# only activation is channels_last
self.common(
m,
(torch.randn([2, 3, 16, 16]).to(memory_format=torch.channels_last),),
check_lowp=False,
)
# activation and weight are all channels_last
self.common(
m.to(memory_format=torch.channels_last),
(torch.randn([2, 3, 16, 16]).to(memory_format=torch.channels_last),),
check_lowp=False,
)
def test_conv2d_backward_channels_last(self):
def fn(grad_output, inp, weight):
convolution_backward_8 = torch.ops.aten.convolution_backward.default(
grad_output,
inp,
weight,
[320],
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
[True, True, True],
)
return convolution_backward_8
# only weight is channels_last
self.common(
fn,
(
torch.randn([2, 320, 8, 8]),
torch.randn([2, 2048, 8, 8]),
torch.randn([320, 2048, 1, 1]).to(memory_format=torch.channels_last),
),
check_lowp=False,
)
def test_conv3d_channels_last(self):
if self.device == "cuda":
raise unittest.SkipTest("only support cpu conv3d channels_last")
m = torch.nn.Sequential(
torch.nn.Conv3d(3, 3, 1, 1),
ToTuple(),
)
# only weight is channels_last
self.common(
m.to(memory_format=torch.channels_last_3d),
(torch.randn([2, 3, 16, 16, 16]),),
)
# only activation is channels_last
self.common(
m,
(torch.randn([2, 3, 16, 16, 16]).to(memory_format=torch.channels_last_3d),),
)
# activation and weight are all channels_last
self.common(
m.to(memory_format=torch.channels_last_3d),
(torch.randn([2, 3, 16, 16, 16]).to(memory_format=torch.channels_last_3d),),
)
def test_adaptive_avg_pool2d1(self):
def fn(x):
return aten._adaptive_avg_pool2d(x, (6, 6)), aten._adaptive_avg_pool2d(
x + 1, (2, 5)
)
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
check_lowp=False,
)
# lowering to avg_pool2d case
self.common(
fn,
(torch.randn(2, 4, 3, 3),),
)
# no-op case
self.common(
fn,
(torch.randn(2, 4, 6, 6),),
)
def test_adaptive_avg_pool2d2(self):
# Big kernel size, use fallback
def fn(x):
return aten._adaptive_avg_pool2d(x, (4, 4))
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(torch.randn(2, 4, 21, 21),),
check_lowp=False,
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
def test_adaptive_avg_pool2d_low_prec(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x = self.avgpool(x)
return x
mod = Model()
for dtype in [torch.half, torch.bfloat16]:
x = torch.randn(4, 3, 7, 7).to(dtype=dtype)
opt_mod = torch.compile(mod)
res = opt_mod(x)
expected = mod(x)
self.assertTrue(torch.allclose(res, expected))
def test_max_pool2d1(self):
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
)
def test_max_pool2d2(self):
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
def test_max_pool2d3(self):
def fn(x):
# with padding
return (
aten.max_pool2d_with_indices(x, [3, 3], [2, 2], [1, 1]),
aten.max_pool2d_with_indices(
x,
[
3,
],
[
2,
],
[
1,
],
),
)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
)
def test_max_pool2d4(self):
def fn(x):
# with padding
return aten.max_pool2d_with_indices(x, [3, 3], [2, 2], [0, 0], [1, 1], True)
self.common(
fn,
(torch.randn([2, 8, 111, 111]),),
)
def test_max_pool2d5(self):
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 3], [])
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
def test_max_pool2d6(self):
# Too big kernel size, use fallback
def fn(x):
return aten.max_pool2d_with_indices(x, [13, 13], [])
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
# From https://github.com/pytorch/pytorch/issues/94775
def test_max_pool2d7(self):
# ceil mode turns on
def fn(x):
return torch.nn.functional.max_pool2d(
x, 1, stride=(2, 2), padding=0, ceil_mode=True
)
self.common(
fn,
(torch.randn([1, 1, 6, 7]),),
)
# From https://github.com/pytorch/pytorch/issues/93384
def test_max_pool2d8(self):
# dialtion is not 1, use fallback
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 2], [2, 1], [1, 1], [1, 2])
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(torch.randn([2, 2, 3, 6]),),
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
def test_avg_pool2d1(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
)
def test_avg_pool2d2(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
def test_avg_pool2d3(self):
def fn(x):
return (
aten.avg_pool2d(x, [3, 3], [2, 2], [1, 1]),
aten.avg_pool2d(
x,
[
3,
],
[
2,
],
[
1,
],
),
)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
)
def test_avg_pool2d4(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2], [0, 0], True)
self.common(
fn,
(torch.randn([2, 8, 111, 111]),),
)
def test_avg_pool2d5(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2], [1, 1], count_include_pad=False)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
)
def test_avg_pool2d6(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2], [1, 1], divisor_override=3)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
)
def test_avg_pool2d7(self):
# Large kernel size, use fallback
def fn(x):
return aten.avg_pool2d(x, [13, 13], [1, 1], [0, 0])
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(-torch.arange(1 * 24 * 24, dtype=torch.float32).view(1, 1, 24, 24),),
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
def test_avg_pool2d8(self):
# https://github.com/pytorch/pytorch/issues/100987
def fn(x):
return aten.avg_pool2d(
x, kernel_size=3, stride=2, padding=1, ceil_mode=True
)
self.common(
fn,
(torch.randn(1, 3, 6, 6),),
)
def test_alexnet_prefix(self):
def forward(arg6, arg7, arg16):
convolution = torch.ops.aten.convolution(
arg16, arg7, arg6, [4, 4], [2, 2], [1, 1], False, [0, 0], 1
)
relu = torch.ops.aten.relu(convolution)
max_pool2d_with_indices = torch.ops.aten.max_pool2d_with_indices(
relu, [3, 3], [2, 2]
)
getitem = max_pool2d_with_indices[0]
return (getitem,)
self.common(
forward,
(
rand_strided((64,), (1,), torch.float32, "cpu"),
rand_strided((64, 3, 11, 11), (363, 121, 11, 1), torch.float32, "cpu"),
rand_strided(
(16, 3, 224, 224), (150528, 50176, 224, 1), torch.float32, "cpu"
),
),
# Mismatched elements: 127 / 746496 (0.0%)
# Greatest absolute difference: 0.0009765625 at index (1, 62, 7, 16) (up to 1e-05 allowed)
# Greatest relative difference: 0.05187467899332306 at index (14, 18, 11, 0) (up to 0.001 allowed)
atol=1e-3,
rtol=0.001,
)
def test_elu(self):
def fn(x):
return aten.elu(x, 1.6732632423543772, 1.0507009873554805) + 2, aten.elu(
x + 1, 2, 3, 4
)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_tan(self):
def fn(x):
return aten.tan(x) + 2, aten.tan(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_tanh(self):
def fn(x):
return aten.tanh(x) + 2, aten.tanh(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_lgamma(self):
def fn(x):
return aten.lgamma(x) + 2, aten.cos(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_cos(self):
def fn(x):
return aten.cos(x) + 2, aten.cos(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_sin(self):
def fn(x):
return aten.sin(x) + 2, aten.sin(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_repeat(self):
def fn(x):
return (
x.repeat(2, 2, 3, 1),
x.repeat(8, 1, 1, 1),
x.repeat(2, 1, 1, 1, 1, 1),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
def test_repeat_interleave(self):
def fn(x):
return (
x.repeat_interleave(2),
x.repeat_interleave(3, dim=0),
x.repeat_interleave(x.size(1), dim=1),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
def test_embedding(self):
m = torch.nn.Sequential(
torch.nn.Embedding(10, 4, padding_idx=0),
torch.nn.ReLU(),
ToTuple(),
)
self.common(
m,
(torch.randint(10, [2, 8]),),
)
def test_mean(self):
def fn(x):
return (
x.mean(),
x.mean(-1),
torch.mean(x, -2, keepdim=True),
x.mean([0, 1]),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
def test_var_mean(self):
def fn(x):
return (
*torch.var_mean(x, -1),
*torch.var_mean(x, [1, 3]),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
@config.patch(pick_loop_orders=True)
def test_transposed_propagates(self):
@torch._dynamo.optimize("inductor", nopython=True)
def fn(x, y):
return x + y
a = torch.randn(1, 4, 4, 4, device=self.device).permute(0, 2, 3, 1)
b = torch.randn(4, 4, 4, device=self.device).permute(1, 2, 0)
c = fn(a, b)
self.assertEqual(a.stride(), c.stride())
self.assertEqual(c.stride()[2], 1)
def test_std(self):
def fn(x):
return (
torch.var(x, True),
torch.var(x, False),
torch.var(x, -1, True),
torch.var(x, -1, False),
torch.std(x, False),
torch.std(x, [0, 1], True),
torch.std(x, [0, 1], False),
torch.std(x, -2, True, keepdim=True),
)
self.common(
fn,
(torch.randn([2, 4, 4, 8]),),
)
def test_embedding_bag(self):
def fn(w, i, o):
return aten._embedding_bag(w, i, o, False, 0, False, None)
self.common(
fn,
(torch.randn([10, 4]), torch.randint(10, [8]), torch.tensor([0, 2, 6])),
)
def test_batch_norm_2d(self):
m = torch.nn.Sequential(
torch.nn.BatchNorm2d(10),
torch.nn.ReLU(),
)
m.eval()
self.common(m, (torch.randn([2, 10, 8, 8]),), check_lowp=False)
self.common(
m,
(torch.randn([3, 10, 16, 16]),),
check_lowp=False, # too painful to match types of bn model
)
def test_layer_norm(self):
m = torch.nn.Sequential(
torch.nn.LayerNorm(32),
torch.nn.ReLU(),
)
m.eval()
self.common(m, (torch.randn([16, 32]),), check_lowp=False)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_transpose_add(self):
def fn(a, b):
return a.t() + b
self.common(
fn, (torch.randn([16, 32]), torch.randn([32, 16])), check_lowp=False
)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
@patch.object(config.triton, "persistent_reductions", True)
def test_softmax_one_kernel_persist(self):
def fn(x):
dim = 1
x_max = torch.amax(x, dim, keepdim=True)
unnormalized = torch.exp(x - x_max)
result = unnormalized / torch.sum(unnormalized, dim, keepdim=True)
return result
self.common(fn, (torch.randn([16, 32]),), check_lowp=False)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
@patch.object(config.triton, "persistent_reductions", False)
def test_softmax_one_kernel_loop(self):
def fn(x):
x_max = torch.amax(x, 1, keepdim=True)
unnormalized = torch.exp(x - x_max)
result = unnormalized / torch.sum(unnormalized, 1, keepdim=True)
return result
self.common(fn, (torch.randn([16, 32]),), check_lowp=False)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_complex_fallback(self):
def fn(x):
return x * x + 10
self.common(
fn,
(torch.randn([1, 2, 4, 8]).to(dtype=torch.complex64),),
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
class ToComplex(nn.Module):
def forward(self, x):
return (x + x + 12).to(torch.complex64)
self.common(ToComplex(), (torch.rand([1, 2, 4, 8]),), check_lowp=False)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_view_as_complex(self):
class Repro(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, view_2):
clone = torch.ops.aten.clone.default(
view_2, memory_format=torch.contiguous_format
)
view_2 = None
view_as_complex = torch.ops.aten.view_as_complex.default(clone)
clone = None
return (view_as_complex,)
inp = torch.empty_strided((128, 64, 12, 32, 2), (1, 98304, 8192, 256, 128)).to(
self.device
)
mod = Repro()
o1 = mod(inp)
o2 = torch.compile(mod)(inp)
self.assertEqual(o1, o2)
def test_cauchy(self):
def fn(x, y):
return torch.sum(1 / (torch.unsqueeze(x, -1) - y))
self.common(
fn,
(
torch.randn(32),
torch.randn(32),
),
# Absolute difference: 0.0003662109375 (up to 0.0001 allowed)
# Relative difference: 1.8804297408767818e-05 (up to 1e-05 allowed)
atol=5 * 1e-4,
rtol=5 * 1e-5,
check_lowp=False,
)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_gather_scatter(self):
def fn(node_feat, edge_index):
src_node_feat = node_feat[edge_index[0]]
dst_node_feat = node_feat[edge_index[1]]
edge_feat = src_node_feat - dst_node_feat + 1
new_node_feat = torch.zeros_like(node_feat)
new_node_feat.scatter_add_(
0, edge_index[1].unsqueeze(-1).expand_as(edge_feat), edge_feat
)
return new_node_feat
num_nodes = 16
num_features = 32
node_feat = torch.randn(num_nodes, num_features)
edge_index = torch.randint(0, num_nodes, size=(2, num_nodes * 5))
self.common(
fn,
(
node_feat,
edge_index,
),
check_lowp=False,
)
if self.device != "cpu":
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2)
@config.patch(max_fusion_size=1)
def test_no_mega_fusion_during_lowering(self):
n = 50
def fn(*args):
x = args[0]
for i in range(n):
x = torch.add(x, args[i])
return x
self.common(
fn,
[torch.randn(64) for _ in range(n)],
check_lowp=False,
)
print("-->", torch._inductor.metrics.generated_kernel_count)
if self.device != "cpu":
self.assertTrue(torch._inductor.metrics.generated_kernel_count > 1)
def test_move_arange(self):
def fn(x):
return torch.arange(len(x), device="cpu").to(x.device) + x
self.common(fn, (torch.randn([32]),), check_lowp=False)
# if we have a copy there will be more than 1 kernel
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_leaky_relu(self):
def fn(x):
return aten.leaky_relu(x, 0.2) + 2, aten.leaky_relu(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_gelu(self):
def fn(x):
return aten.gelu(x) + 2, aten.gelu(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_clone(self):
def fn(x):
return aten.clone(x) + 2, aten.clone(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_masked_fill(self):
def fn(mask, value):
return aten.masked_fill(value, mask, -10000.0) + 2, aten.masked_fill(
value / 2.0, torch.logical_not(mask), 667
)
self.common(
fn,
(
torch.randint(0, 1, [1, 16], dtype=torch.bool),
torch.randn([16, 16]),
),
)
def test_masked_fill_promotion(self):
def fn(mask, value):
return aten.masked_fill(value, mask, torch.tensor(3.5))
opt_fn = torch._dynamo.optimize("inductor")(fn)
for inp in (
torch.randn(
[16, 16],
dtype=torch.float16 if self.device == "cuda" else torch.float32,
device=self.device,
),
torch.randint(16, (16, 16), device=self.device),
):
inputs = (
torch.randint(0, 1, [1, 16], dtype=torch.bool, device=self.device),
inp,
)
self.assertEqual(fn(*inputs), opt_fn(*inputs))
def test_fill1(self):
def fn(x):
tmp = torch.ones_like(x)
return tmp, aten.fill.Scalar(tmp, 2)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_fill2(self):
def fn(x):
tmp = torch.ones_like(x)
return tmp, aten.fill.Tensor(tmp, torch.tensor(3.0))
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_pow1(self):
def fn(x):
return [aten.pow(x, e) for e in range(-8, 9)]
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_pow2(self):
def fn(x):
return aten.pow(1000, x), aten.pow(x, 1000)
self.common(
fn,
# TODO: Remove dtype once https://github.com/pytorch/pytorch/issues/94010 is fixed
(
torch.randn(
[16, 16],
dtype=torch.float64 if self.device == "cpu" else torch.float32,
),
),
# Mismatched elements: 9 / 256 (3.5%)
# Greatest absolute difference: 2.491354329061828e+28 at index (6, 6) (up to 1e-05 allowed)
# Greatest relative difference: 2.9793410720160818e-05 at index (4, 5) (up to 1.3e-06 allowed)
atol=1e-5,
rtol=3e-05,
)
def test_pow3(self):
# power of 0.5 is special-cased, arbitrary power would still produce triton codegen error
def fn(x):
z = torch.tensor(0.123, device=self.device)
w = z + x
return torch.pow(w, 0.5)
opt = torch._dynamo.optimize("inductor")(fn)
input = torch.rand(())
self.assertTrue(same(opt(input), fn(input)))
def test_pow_int(self):
def fn(x, y):
return torch.pow(x, 0x57), torch.pow(x, y)
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
intmax = torch.iinfo(dtype).max
make_arg = functools.partial(
make_tensor, dtype=dtype, device="cpu", requires_grad=False
)
self.common(
fn,
(
make_arg(16, 16),
make_arg(16, 16, high=intmax),
),
)
def test_glu(self):
def fn(x):
return aten.glu(x, -1), aten.glu(x, 1), aten.glu(x, 2)
self.common(
fn,
(torch.randn([8, 16, 8, 8]),),
)
def test_cat(self):
def fn(a):
tmp = a * 2
return (
torch.cat((a, a[:, :4] + 1, a + 2), -1),
torch.cat((tmp, tmp), 0),
torch.cat((tmp, tmp.double()), 0),
)
self.common(
fn,
(torch.randn([8, 16]),),
)
self.common(
fn,
(torch.randn([1, 3, 3, 16]).to(memory_format=torch.channels_last),),
)
def test_cat_upcasting(self):
def fn(arg4_1, slice_7):
cat_1 = aten.cat.default([arg4_1, slice_7], 1)
return (cat_1,)
self.common(
fn,
(
torch.randn([8, 16], dtype=torch.float32),
torch.randn([8, 20], dtype=torch.float16),
),
)
def test_cat_extern_kernel(self):
def fn(x1, x2, x3, x4):
x = torch.mm(x2, x3)
s = torch.narrow(x, 1, 0, 100)
x = torch.mm(s, x4)
c = torch.cat((x, x1), 1)
return (c,)
self.common(
fn,
(
torch.randn(256, 256),
torch.randn(256, 1024),
torch.randn(1024, 1600),
torch.randn(100, 256),
),
check_lowp=False, # accuracy issues with relatively large matmuls
)
def test_cat_of_loops_and_extern_kernel(self):
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(
64,
5,
1,
**kwargs,
)
self.max_pool2d = torch.nn.MaxPool2d(2)
def forward(self, x, y):
x1 = self.conv(x)
y1 = self.max_pool2d(y)
return torch.cat([x1, y1], 1)
mod = M()
opt_mod = torch._dynamo.optimize("inductor")(mod)
memory_format = torch.channels_last
inputs = (
torch.randn([1, 64, 16, 16]).to(memory_format=memory_format),
torch.randn([1, 64, 32, 32]).to(memory_format=memory_format),
)
y = mod(*inputs)
opt_y = opt_mod(*inputs)
self.assertEqual(y, opt_y)
self.assertEqual(y.stride(), opt_y.stride())
def test_cat_inplace(self):
def fn(x):
rt = torch.cat([x])
v = x.sin_()
return rt
# can't use self.common because input is modified inplace
inp = torch.ones(2)
opt_fn = torch.compile(fn)
res = opt_fn(inp.clone())
expected = fn(inp.clone())
self.assertEqual(res, expected)
def test_stack(self):
def fn(a, b):
return torch.stack(
[
a.expand(12, 16),
b.expand(12, 16),
],
2,
)
self.common(fn, (torch.randn([1, 16]), torch.randn([12, 1])))
def test_hardtanh(self):
def fn(x):
return F.hardtanh(x), F.hardtanh(x + 1), F.hardtanh(x - 1)
self.common(
fn,
(torch.randn([64]),),
)
def test_hardsigmoid(self):
def fn(x):
return F.hardsigmoid(x), F.hardsigmoid(x + 3), F.hardsigmoid(x - 3)
self.common(
fn,
(torch.randn([64]),),
)
def test_hardswish(self):
def fn(x):
return F.hardswish(x), F.hardswish(x + 3), F.hardswish(x - 3)
self.common(
fn,
(torch.randn([64]),),
)
def test_rsqrt(self):
def fn(x):
return torch.rsqrt(x), torch.rsqrt(x + 1) - 2
self.common(
fn,
(torch.randn([64]),),
)
def test_expm1(self):
def fn(x):
return torch.expm1(x), torch.expm1(x) * 2
for dtype in (torch.float16, torch.float, torch.double, torch.int, torch.int64):
self.common(
fn,
(torch.randn([64]).to(dtype=dtype),),
)
self.common(
fn,
(torch.arange(-1e-5, 1e-5, 1e-7).to(dtype=dtype),),
)
def test_log1p(self):
def fn(x):
return torch.log1p(x), torch.log1p(x) * 2
for dtype in (torch.float16, torch.float, torch.double, torch.int, torch.int64):
self.common(
fn,
(torch.randn([64]).to(dtype=dtype),),
)
self.common(
fn,
(torch.arange(-1e-5, 1e-5, 1e-7).to(dtype=dtype),),
)
def test_flip(self):
def fn(x):
return torch.flip(x, (-1,)), torch.flip(x, (0, 2)) - 2
self.common(
fn,
(torch.randn([1, 2, 6, 6]),),
)
def test_signbit(self):
def fn(x):
return torch.signbit(x), ~torch.signbit(-x) & 1
self.common(
fn,
(torch.randn([1, 2, 6, 6]),),
)
def test_sign_dtype(self):
def fn(x):
y = torch.sign(x)
return torch.tanh(y)
self.common(fn, (torch.randn([1, 2, 6, 6]),))
def test_fmod(self):
def fn(a, b):
return torch.fmod(a, b), torch.fmod(3.0 * a, b) - 2.0
shape = [1, 2, 6, 6]
self.common(fn, (torch.randn(shape), torch.randn(shape)))
def test_fmod_zero_dim(self):
def fn(a, b):
return (torch.fmod(a, b),)
self.common(
fn,
(
make_tensor(10, device="cpu", dtype=torch.float32),
make_tensor((), device="cpu", dtype=torch.float32),
),
)
self.common(
fn,
(
make_tensor((), device="cpu", dtype=torch.float32),
make_tensor(10, device="cpu", dtype=torch.float32),
),
)
def test_log2(self):
def fn(x):
return torch.log2(x), torch.log2(x + 1) - 2
self.common(
fn,
(torch.randn([64]) + 10,),
)
def test_logsumexp(self):
def fn(x):
return torch.logsumexp(x, -1), torch.logsumexp(x, 0) - 2
self.common(
fn,
(torch.randn([8, 8]) + 10,),
)
def test_log_fp64(self):
def fn(x):
return torch.log(x), torch.log2(x)
self.common(
fn,
(torch.randn([1024], dtype=torch.float64) + 10,),
)
def test_bitwise(self):
def fn(x, y):
return (
torch.bitwise_not(x),
torch.bitwise_or(x, y),
torch.bitwise_xor(x, y),
torch.bitwise_and(x, y),
)
self.common(
fn,
(
torch.randint(0, 2**30, [64], dtype=torch.int32),
torch.randint(0, 2**30, [64], dtype=torch.int32),
),
)
def test_bitwise2(self):
# again with bool types
def fn(x, y):
return (
torch.bitwise_not(x),
torch.bitwise_or(x, y),
torch.bitwise_xor(x, y),
torch.bitwise_and(x, y),
)
self.common(
fn,
(
torch.randint(0, 2, (2, 20), dtype=torch.bool),
torch.randint(0, 2, (2, 20), dtype=torch.bool),
),
)
def test_bitwise3(self):
# Repro for https://github.com/pytorch/pytorch/issues/97968
def fn(x, y):
return (
torch.max(torch.bitwise_and(x, y), y),
torch.clamp_max(torch.bitwise_or(x, y), y),
torch.clamp_min(torch.bitwise_xor(x, y), y),
)
self.common(
fn,
(
torch.rand([5, 10, 1]).to(torch.int8),
torch.rand([10, 1]).to(torch.int8),
),
)
def test_inf(self):
def fn(a):
return a + float("inf"), a + float("-inf"), a * -float("inf")
self.common(fn, (torch.randn(8),))
def test_remainder(self):
def fn(a, b):
return (
torch.remainder(a, b),
torch.remainder(a + 1, b - 1),
torch.remainder(a - 1, b + 1),
)
self.common(fn, (torch.randn(64), torch.randn(64)))
def test_zeros(self):
def fn(a):
return (
a + 1,
torch.zeros(
(1, 8, 64, 64),
dtype=torch.float32,
device=a.device,
),
torch.zeros(
1,
8,
64,
64,
dtype=torch.float32,
device=a.device,
),
torch.zeros(2, 3, names=None),
a + torch.ones(8, device=a.device),
torch.full((2, 3), 3.1416, device=a.device),
)
self.common(fn, (torch.randn(8),))
def test_new_ones(self):
def fn(a):
return (
aten.new_ones(
a, [], device=a.device, dtype=6, layout=0, pin_memory=False
),
aten.new_zeros(
a, [], device=a.device, dtype=6, layout=0, pin_memory=False
),
)
self.common(fn, (torch.randn(8),))
def test_full_like(self):
def fn(a):
return torch.full_like(a, 7.777) - 1
self.common(fn, (torch.randn(8),))
def test_full_truncation(self):
def fn(a):
return a + torch.full_like(a, 7.777)
for dtype in all_types():
self.common(fn, (make_tensor(8, dtype=dtype, device="cpu"),))
def test_index1(self):
def fn(a, b, c):
return aten.index(a, [b, c])
self.common(
fn,
(
torch.randn(8, 8, 12),
torch.tensor([0, 0, 2, 2], dtype=torch.int64),
torch.tensor([3, 4, 4, 3], dtype=torch.int64),
),
)
self.common(
fn,
(
torch.randn(8, 8, 12),
torch.tensor([[0, 0, 2, 2]], dtype=torch.int64),
torch.tensor([[3], [4], [4], [3]], dtype=torch.int64),
),
)
def test_index2(self):
def fn(a, b):
return (
aten.index(a, [b]),
aten.index(a, [None, b]),
)
self.common(
fn,
(
torch.randn(8, 8, 8),
torch.tensor([[0, 0, 2, 2]], dtype=torch.int64),
),
)
def test_index3(self):
def fn(x, ia, ib):
return (x[:, ia, None, ib, 0],)
self.common(
fn,
(
torch.randn(3, 4, 4, 4, 3),
torch.tensor([0, 2, 1], dtype=torch.int64),
torch.tensor([0, 2, 1], dtype=torch.int64),
),
)
def test_output_strides(self):
def fn(x):
y = x.permute(0, 2, 3, 1).contiguous()
torch._dynamo.graph_break()
return y.view(-1, 4)
inp = torch.rand([4, 4, 4, 4], device=self.device)
fn_opt = torch._dynamo.optimize("inductor")(fn)
self.assertEqual(fn(inp), fn_opt(inp))
self.assertEqual(fn(inp).stride(), fn_opt(inp).stride())
# no redundant copy
def foo(x):
return x[0:2:2].T[3:].squeeze(0)
foo_opt = torch._dynamo.optimize("inductor")(foo)
out = foo_opt(inp)
self.assertEqual(inp.storage(), out.storage())
def test_index_select(self):
def fn(a, b):
return (
torch.index_select(a, 0, b),
torch.index_select(a, 1, b),
torch.index_select(torch.index_select(a, 2, b), 1, b),
)
for ind_dtype in (torch.int32, torch.int64):
self.common(
fn,
(
torch.randn(8, 8, 8),
torch.tensor([0, 0, 2, 1], dtype=ind_dtype),
),
)
@skipIfRocm
def test_cudnn_rnn(self):
if self.device == "cpu":
raise unittest.SkipTest("requires CUDA")
def fn(
a0,
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
a3,
a4,
a5,
):
a1 = [
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
]
return aten._cudnn_rnn(
a0,
a1,
4,
a3,
a4,
a5,
2,
2048,
0,
2,
False,
0.0,
False,
True,
[],
None,
)
self.common(
fn,
(
torch.randn([92, 8, 2048]),
torch.randn([8192, 2048]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([8192, 2048]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([8192, 4096]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([8192, 4096]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([167837696]),
torch.randn([4, 8, 2048]),
torch.randn([4, 8, 2048]),
),
check_lowp=False, # difference in rnn is too large between half and float inputs
)
def test_upsample_nearest1d(self):
def fn(a):
return (
aten.upsample_nearest1d(a, [74], None),
aten.upsample_nearest1d(a, [70], None),
aten.upsample_nearest1d(a, [45], None),
aten.upsample_nearest1d(a, [36], None),
aten.upsample_nearest1d(a, None, [2.0]),
)
self.common(fn, (torch.randn([2, 4, 37]),))
def test_upsample_nearest2d(self):
def fn(a):
return (
aten.upsample_nearest2d(a, [74, 76]),
aten.upsample_nearest2d(a, [70, 75]),
aten.upsample_nearest2d(a, [45, 74]),
aten.upsample_nearest2d(a, [36, 39]),
aten.upsample_nearest2d(a, None, [2.0, 2.0]),
)
self.common(fn, (torch.randn([2, 4, 37, 38]),))
def test_upsample_nearest3d(self):
def fn(a):
return (
aten.upsample_nearest3d(a, [74, 76, 78], None),
aten.upsample_nearest3d(a, [70, 75, 80], None),
aten.upsample_nearest3d(a, [45, 74, 103], None),
aten.upsample_nearest3d(a, [36, 39, 40], None),
aten.upsample_nearest3d(a, None, [2.0, 2.0, 2.0]),
)
self.common(fn, (torch.randn([2, 4, 37, 38, 39]),))
def test_upsample_nearest2d_backward(self):
func = torch.ops.aten.upsample_nearest2d_backward
def fn(a):
return (
func(a, output_size=[6, 12], input_size=[3, 3, 3, 6]),
func(a, output_size=[6, 12], input_size=[3, 3, 4, 5]),
func(a, output_size=[6, 12], input_size=[3, 3, 2, 8]),
func(a, output_size=[6, 12], input_size=[3, 3, 2, 8]),
func(a, output_size=[6, 12], input_size=[3, 3, 4, 7]),
)
self.common(fn, (torch.randn([3, 3, 6, 12]),))
@skip_if_x86_mac()
def test_upsample_bilinear2d_a(self):
def fn(a):
return (
aten.upsample_bilinear2d(a, [45, 45], False, None),
aten.upsample_bilinear2d(a, None, True, [2.0, 2.0]),
)
self.common(fn, (torch.randn([2, 4, 37, 38]),), atol=2.5e-5, rtol=1.3e-6)
def test_upsample_bilinear2d_b(self):
def fn(a):
return aten.upsample_bilinear2d(a, None, True, [2.0, 2.0])
self.common(
fn,
[
torch.randn([1, 2, 40, 59]),
],
atol=2.5e-5,
rtol=1.3e-6,
)
def test_reflection_pad2d(self):
def fn(a):
return (
aten.reflection_pad2d(a, [1, 1, 1, 1]),
aten.reflection_pad2d(a, [1, 2, 3, 4]),
)
self.common(
fn, (torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),)
)
def test_reflection_pad2d_backward(self):
def template(size, padding):
def fn(grad_output, x):
return aten.reflection_pad2d_backward(grad_output, x, padding)
x = torch.randint(0, 999, size=size, dtype=torch.float32)
result = aten.reflection_pad2d(x, padding)
grad_output = torch.randn_like(result)
self.common(fn, (grad_output, x))
template([1, 1, 8, 8], [0, 0, 0, 0])
template([1, 1, 8, 8], [1, 1, 1, 1])
template([1, 1, 8, 8], [1, 2, 3, 4])
template([1, 1, 8, 8], [0, -1, 2, 2])
template([1, 1, 8, 8], [-1, 0, 2, 2])
template([1, 1, 8, 8], [2, 2, 0, -1])
template([1, 1, 8, 8], [2, 2, -1, 0])
def test_grid_sampler_2d(self):
def fn(a, b):
return (
aten.grid_sampler_2d(a, b, 0, 0, True),
aten.grid_sampler_2d(a, b, 0, 1, False),
)
self.common(
fn,
(
torch.randn([4, 3, 352, 352], dtype=torch.float32),
torch.rand([4, 352, 352, 2], dtype=torch.float32) * 2 - 1,
),
check_lowp=False,
# Mismatched elements: 154697 / 1486848 (10.4%)
# Greatest absolute difference: 0.0001976490020751953 at index (0, 0, 101, 243) (up to 1e-05 allowed)
# Greatest relative difference: 7.332530120481928 at index (1, 1, 258, 301) (up to 1.3e-06 allowed)
atol=0.0002,
rtol=1.3e-06,
)
def test_upsample_bicubic2d(self):
def fn(a):
return (
aten.upsample_bicubic2d(a, (128, 128), True),
aten.upsample_bicubic2d(a, (128, 256), False),
)
# Mismatched elements: 10 / 196608 (0.0%)
# Greatest absolute difference: 1.3869255781173706e-05 at index (2, 1, 88, 65) (up to 1e-05 allowed)
# Greatest relative difference: 0.0033082996811011046 at index (3, 1, 88, 91) (up to 1.3e-06 allowed)
self.common(
fn,
(torch.randn([4, 3, 64, 32], dtype=torch.float32),),
atol=2e-5,
rtol=1e-3,
)
def test_sort(self):
def fn(a):
return torch.sort(a)
self.common(
fn, (torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),)
)
def test_topk(self):
def fn(a):
return torch.topk(a, 2, -1)
self.common(
fn, (torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),)
)
def test_long_tensor(self):
def fn(a):
return (
torch.LongTensor([294]).to(a.device) - a,
torch.as_tensor([295]).to(a.device) + a,
)
self.common(fn, (torch.randint(0, 999, size=[8, 8]),))
def test_constant_pad_1d(self):
def fn(a):
return (
aten.constant_pad_nd(a, [0, 1], 6.0),
aten.constant_pad_nd(a, [2, 3], 99.0),
)
self.common(fn, (torch.randint(0, 999, size=[2, 16, 31], dtype=torch.float32),))
def test_constant_pad_fill_dtype(self):
def fn(a, b):
return (
aten.constant_pad_nd(a, (1, 1), 1.0) & b,
aten.constant_pad_nd(a, (1, 1), 0.0) & b,
)
self.common(
fn,
(torch.randint(2, (4,), dtype=torch.bool), torch.ones(6, dtype=torch.bool)),
)
def test_constant_pad_2d(self):
def fn(a):
return (
aten.constant_pad_nd(a, [1, 1, 1, 1], 6.0),
aten.constant_pad_nd(a, [1, 2, 3, 4], 99.0),
)
self.common(
fn, (torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),)
)
def test_constant_pad_3d(self):
def fn(a):
return (
aten.constant_pad_nd(a, [1, 2, 3, 4, 5, 6], 6.0),
aten.constant_pad_nd(a, [0, 0, 3, 4, 0, 0], 6.0),
)
self.common(
fn, (torch.randint(0, 999, size=[2, 4, 4, 4], dtype=torch.float32),)
)
def test_constant_pad_float64(self):
# Repro for https://github.com/pytorch/pytorch/issues/93351
def fn(input):
v1 = torch.nn.functional.pad(input, pad=(1, 0))
return torch.gt(v1, input)
x = torch.rand([1, 2, 2, 1], dtype=torch.float64)
self.common(fn, (x,))
def test_constant_pad_nd_inplace(self):
def fn(a):
return aten.constant_pad_nd(a, [0, 0])
x = torch.randn([2], device=self.device)
fn_compiled = torch.compile(fn)
y = fn_compiled(x)
self.assertTrue(y is not x)
def test_l1_loss(self):
def fn(a, b):
return torch.nn.functional.l1_loss(a, b), torch.nn.functional.mse_loss(a, b)
self.common(
fn,
(
torch.randn([2, 3, 16, 16]),
torch.randn([2, 3, 16, 16]),
),
check_lowp=False,
)
def test_triu(self):
def fn(a):
return aten.triu(a, 1), aten.triu(a, 0), aten.triu(a, 2)
self.common(fn, (torch.randn([2, 10, 10]),))
def test_no_op_reduction(self):
def fn(a):
return a.sum(-1), torch.amax(a + 1, 1, keepdim=True)
self.common(fn, (torch.randn([8, 1, 1]),))
def test_inplace_add(self):
@torch._dynamo.optimize("inductor")
def fn(x, y):
return x.add_(y)
inputs = (
rand_strided((4, 4), (4, 1), device=self.device),
rand_strided((4, 4), (4, 1), device=self.device),
)
inp_clone = inputs[0].clone()
out = fn(*inputs)
self.assertTrue(same(out, inp_clone + inputs[1]))
self.assertTrue(out is inputs[0])
# The following 2 tests are meant to check the logic that drops
# xmask from triton load/store if xnumel = 1
@requires_cuda()
def test_single_elem(self):
def fn(a):
b = a + 1
return (b,)
self.common(fn, (torch.randn(1),))
@requires_cuda()
def test_single_elem_indirect(self):
def fn(a, b):
c = a[b] + 1
return (c,)
a = torch.randn(1)
b = (torch.tensor([0], dtype=torch.int64),)
self.common(fn, (a, b))
# This test is meant to check for issues from the logic
# that drops xmask from trito load/store if XBLOCK divides xnumel
@requires_cuda()
def test_xblock_divides_xnumel(self):
def fn(a):
b = a + 1
return (b,)
# assumption is that XBLOCK is always a divisor of 1024
# so xmask will be dropped iff xnumel is multiple of 1024
self.common(fn, (torch.randn(1024),))
self.common(fn, (torch.randn(1025),))
def test_inplace_mixed_dtype_ops(self):
@torch._dynamo.optimize("inductor")
def fn(x, y):
z = x + y.float()
w = z.add_(y)
return w.mul_(y)
inputs = (
rand_strided((4, 4), (4, 1), device=self.device, dtype=torch.float),
rand_strided((4, 4), (4, 1), device=self.device, dtype=torch.double),
)
out = fn(*inputs)
out_eager = (inputs[0] + inputs[1].float()).add_(inputs[1]).mul_(inputs[1])
self.assertTrue(same(out, out_eager))
@config.patch(
{"triton.unique_kernel_names": True, "triton.descriptive_names": False}
)
def test_kernel_names(self):
@torch._dynamo.optimize("inductor")
def fn(x):
return 2 * x
inputs = (rand_strided((8,), (1,), device=self.device),)
self.assertTrue(same(fn(*inputs), 2 * inputs[0]))
@config.patch({"triton.cudagraphs": True if not torch.version.hip else False})
def test_strided_inputs(self):
@torch._dynamo.optimize("inductor")
def fn(x, y):
return x + y
inputs = (
rand_strided((8, 16), (32, 2), device=self.device),
rand_strided((8, 16), (16, 1), device=self.device),
)
self.assertTrue(same(fn(*inputs), inputs[0] + inputs[1]))
@config.patch({"triton.cudagraphs": True if not torch.version.hip else False})
def test_input_mutation1(self):
def fn(a):
b = a + 1
a.copy_(b)
c = a + 2
return a * b / c
arg1 = torch.randn(64, device=self.device)
arg2 = arg1.clone()
arg3 = torch.randn(64, device=self.device)
arg4 = arg3.clone()
correct1 = fn(arg1)
correct2 = fn(arg3)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
actual2 = opt_fn(arg4)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(actual2, correct2))
self.assertTrue(same(arg1, arg2))
self.assertTrue(same(arg3, arg4))
def test_input_mutation2(self):
def fn(a):
b = a + 1
a.view(64).copy_(torch.tensor([66.0], device=a.device))
c = a + 2
return b, c
# NOTE: this test fails when none of the inputs require grad.
# That seems like an inductor bug.
arg1 = torch.randn([1, 64], device=self.device).requires_grad_(True).add(1)
arg2 = arg1.clone()
correct1 = fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(arg1, arg2))
def test_input_mutation3(self):
def fn(a):
a += 1
a *= 2
aten.sigmoid_(a)
a = a.view(64)
a += 3
a *= 4
aten.relu_(a)
return a
arg1 = torch.randn([1, 64], device=self.device)
arg2 = arg1.clone()
correct1 = fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(arg1, arg2))
def test_input_mutation4(self):
def fn(a):
torch.relu_(a)
return a
arg1 = torch.randn([1, 64], device=self.device)
arg2 = arg1.clone()
correct1 = fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(arg1, arg2))
def test_slice_mutation1(self):
def fn(a):
x = torch.zeros_like(a)
b = x + 1
x[:, 3] = 3.0
c = torch.clone(x)
x[4, :] = 4.0
d = x + 1
return x, b, c, d
self.common(fn, (torch.randn([8, 8]),))
def test_slice_mutation2(self):
def fn(a):
a[:, 20:40] = a[:, 20:40] + 1
a[:, 2:11] = a[:, 1:10] + 2
arg1 = torch.randn([1, 64], device=self.device)
arg2 = arg1.clone()
fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
opt_fn(arg2)
# TODO, fix: See https://github.com/pytorch/pytorch/issues/94693
if self.device != "cpu":
self.assertTrue(same(arg1, arg2))
def test_indirect_load_broadcast(self):
def fn(in_ptr0, in_ptr1, in_ptr2):
return torch.gather(in_ptr1, 0, in_ptr2) + in_ptr0
arg190 = rand_strided((32, 21), (1, 32), device=self.device, dtype=torch.int64)
arg190.fill_(0)
arg111 = rand_strided(
(9521, 512), (512, 1), device=self.device, dtype=torch.float32
)
self.common(
fn,
(
torch.randn(32, 1),
arg111,
arg190,
),
)
@unittest.skipIf(not has_torchvision_roi_align(), "requires torchvision")
def test_roi_align(self):
def fn(a, b):
return torch.ops.torchvision.roi_align(a, b, 0.25, 7, 7, 2, False)
self.common(fn, (torch.zeros([4, 256, 296, 304]), torch.zeros([2292, 5])))
def test_nll_loss_forward(self):
def fn(a, b):
return aten.nll_loss_forward(a, b, None, 1, -100)
labels = (
torch.zeros([5], dtype=torch.int64),
torch.tensor([-100, -100, 3, -100, -100], dtype=torch.int64),
)
inps = (torch.randn(5, 5), torch.randn(5, 5))
for a, b in zip(inps, labels):
self.common(
fn,
(a, b),
)
def test_nll_loss_backward(self):
def fn(a, b, c):
return aten.nll_loss_backward(
a, b, c, None, 1, -100, torch.tensor(1.0, device=self.device)
)
labels = (
torch.zeros([5], dtype=torch.int64),
torch.tensor([-100, -100, 3, -100, -100], dtype=torch.int64),
)
inps = (torch.randn(5, 5), torch.randn(5, 5))
grad_outs = (torch.randn(()), torch.randn(()))
for a, b, c in zip(grad_outs, inps, labels):
self.common(
fn,
(a, b, c),
)
def test_isinf(self):
def fn(x):
return x.isinf(), x.isnan()
self.common(
fn, [torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")])]
)
self.common(
fn,
[
torch.tensor(
[1, float("inf"), 2, float("-inf"), float("nan")],
dtype=torch.float64,
)
],
)
def test_isinf2(self):
def fn(x):
y = torch.tensor(
[1, float("inf"), 2, float("-inf"), float("nan")], device=self.device
)
return x == y
self.common(
fn, (torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]),)
)
def test_any(self):
def fn(x):
return (
x.any(-1),
x.isinf().any(),
torch.all(x.isinf(), dim=0),
torch.all(torch.logical_not(x.isinf())),
)
self.common(fn, [-torch.rand(64)])
tmp = torch.randn(16, 8)
tmp[1, 1] = float("inf")
self.common(fn, [tmp])
def test_inplace_activations(self):
def fn(x):
a = aten.hardswish_(x + 1)
b = aten.hardtanh_(x + 1)
c = aten.leaky_relu_(x + 1)
d = aten.silu_(x + 1)
e = aten.log1p(x + 1)
f = aten.masked_fill_(x + 1, torch.zeros_like(x, dtype=torch.bool), 99.0)
h = aten.masked_fill_(x + 1, torch.ones_like(x, dtype=torch.bool), 99.0)
return (a, b, c, d, e, f, h)
self.common(fn, [torch.randn(64) * 10])
def test_baddbmm(self):
def fn(a, b, c, beta):
return aten.baddbmm(a, b, c, beta=beta)
b = torch.randn(6, 128, 64)
c = torch.randn(6, 64, 100)
options = itertools.product(
[torch.randn(6, 1, 100), torch.randn(6, 1, 100).fill_(torch.nan)],
[0.0, 1.0],
)
for a, beta in options:
self.common(
fn,
[a, b, c, beta],
# Mismatched elements: 1212 / 76800 (1.6%)
# Greatest absolute difference: 0.001953125 at index (0, 0, 93) (up to 1e-05 allowed)
# Greatest relative difference: 1.0 at index (3, 19, 4) (up to 0.001 allowed)
atol=0.002,
rtol=0.001,
)
@config.patch({"triton.max_tiles": 2})
def test_fuse_tiled(self):
def fn(a, b, c):
return a + b, c + 1
self.common(
fn, [torch.randn(128, 1), torch.randn(1, 128), torch.randn(128, 128)]
)
def test_expand_as(self):
def fn(a, b):
return aten.expand_as(a, b), aten.expand_as(a + 1, b + 1) + 1
self.common(
fn,
[
torch.randn(6, 1, 100),
torch.randn(6, 128, 100),
],
)
def test_index_put1(self):
def fn(a, b, c):
return (
torch.index_put(a, [b], c),
torch.index_put_(a + 1, [b + 1], c + 1) + 1,
)
self.common(
fn,
[
torch.randn([800, 256, 7, 7]),
torch.randperm(601),
torch.randn([601, 256, 7, 7]),
],
)
self.common(
fn, [torch.randn(1024, 4, 2), torch.arange(4), torch.randn(4, 1, 1)]
)
def test_index_put2(self):
def fn(a, b, c):
return torch.index_put(a, [b], c, True)
self.common(
fn,
[
torch.randn([100, 256, 7, 7]),
torch.randint(0, 100, size=[600], dtype=torch.int64),
torch.randn([600, 256, 7, 7]),
],
# workaround for https://github.com/openai/triton/issues/558
check_lowp=False,
)
def test_index_put3(self):
def fn(a, b, c):
torch.ops.aten.index_put_(a, (None, b, None), c)
a1 = a + 1
torch.ops.aten.index_put_(a1, (None, b + 1, None), c + 1)
return (a, a1)
self.common(
fn,
[
torch.randn([1024, 4, 2]),
torch.arange(3),
torch.randn([1024, 1, 2]),
],
)
def test_index_put4(self):
# a, b[0] are not broadcastable
# https://github.com/pytorch/pytorch/issues/97104
def fn(a, b, c):
return torch.index_put(a, [b], c)
self.common(
fn,
[
torch.rand([8, 2]),
torch.rand([8]) > 0.5,
torch.rand([]),
],
)
def test_index_put_as_masked_fill(self):
def fn(a, b, c, d):
a = a.clone()
torch.ops.aten.index_put_(a, [b], c, d)
return a
self.common(
fn,
(
torch.randn([1024, 4, 2]),
torch.randn([1024, 4, 2]) > 0,
torch.randn([]),
False,
),
)
self.common(
fn,
(
torch.randn([1024, 4, 2]),
torch.randn([1024, 4, 2]) > 0,
torch.randn([]),
True,
),
)
def test_index_put_fallback1(self):
def fn(a, b, c, d):
a = a.clone()
torch.ops.aten.index_put_(a, [b], c, d)
return a
self.common(
fn,
(
torch.randn([3]),
torch.as_tensor([True, True, False]),
torch.randn([2]),
False,
),
)
self.common(
fn,
(
torch.randn([3]),
torch.as_tensor([True, True, False]),
torch.randn([2]),
True,
),
)
def test_index_put_fallback2(self):
def fn(a, b, c, d, e):
a = a.clone()
torch.ops.aten.index_put_(a, [None, b, c], d, e)
return a
self.common(
fn,
(
torch.randn([1, 2, 3]),
torch.as_tensor([0, 1]),
torch.as_tensor([True, True, False]),
torch.randn([]),
False,
),
)
self.common(
fn,
(
torch.randn([1, 2, 3]),
torch.as_tensor([0, 1]),
torch.as_tensor([True, True, False]),
torch.randn([]),
True,
),
)
def test_index_put_deterministic_fallback(self):
with DeterministicGuard(True):
def fn(a, b, c):
return torch.index_put(a, [b], c, True)
self.common(
fn,
[
torch.randn([100, 32]),
torch.randint(0, 100, size=[600], dtype=torch.int64),
torch.randn([600, 32]),
],
check_lowp=False,
)
def test_index_put_index(self):
def fn(ind, x, src):
y = torch.ops.aten.index_put.default(x, [ind], src)
return torch.ops.aten.index.Tensor(y, [ind])
args = [torch.tensor([1], dtype=torch.int64), torch.randn(8, 4), torch.randn(4)]
self.common(fn, args)
@config.patch(fallback_random=True)
def test_bernoulli1(self):
def fn(a):
b = torch.empty_like(a)
return aten.bernoulli_(b), b
self.common(
fn,
[
torch.randn([100]),
],
)
def test_bernoulli2(self):
def fn(a):
return aten.bernoulli(a)
self.common(
fn,
[torch.tensor([1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0])],
)
def test_narrow(self):
def fn(x):
return (
aten.narrow(x, 1, 10, 16),
aten.narrow(x + 2, 0, 10, 16) + 1,
aten.narrow_copy(x, 1, 10, 16),
)
self.common(fn, [torch.randn(64, 64)])
def test_as_strided(self):
def fn(x):
return (
aten.as_strided(x, (8, 8, 64), (8 * 64, 64, 1), 0),
aten.as_strided(x + 1, (8, 8, 64), (8 * 64, 64, 1), 0) + 2,
)
def fn_channels_last(x):
return (
aten.as_strided(
x, (8, 384, 2, 20, 12), (153600, 1, 61440, 384, 7680), 0
),
aten.as_strided(
x + 1, (8, 384, 2, 20, 12), (153600, 1, 61440, 384, 7680), 0
)
+ 2,
)
self.common(fn, [torch.randn(64, 64)])
self.common(
fn_channels_last,
[torch.randn(8, 384, 20, 20).to(memory_format=torch.channels_last)],
)
def test_as_strided_scatter(self):
def fn(a, b):
return aten.as_strided_scatter(
a * 8 + 10,
b * 2 - 4,
size=(a.shape[0], a.shape[1] // 2),
stride=(a.shape[1], 2),
storage_offset=0,
)
self.common(fn, [torch.randn(10, 1024), torch.randn(10, 512)])
def test_select_scatter(self):
def fn(x, a, b):
return (
aten.select_scatter(x, a, 1, 0),
aten.select_scatter(x, b, 0, 1),
)
self.common(
fn,
[
torch.randn(8, 197, 38),
torch.randn(8, 38),
torch.randn(197, 38),
],
)
def test_slice_scatter(self):
def fn(x, a):
return (
aten.slice_scatter(x, a, 2, 10, -10),
aten.slice_scatter(x, a[:, :, :40], 2, 10, -10, 2),
)
self.common(
fn,
[
torch.randn(4, 8, 100),
torch.randn(4, 8, 80),
],
)
def test_slice_scatter2(self):
def fn(a, b):
return aten.slice_scatter(a, b, 0, 0, 9223372036854775807)
self.common(
fn,
[
torch.randn([8, 197, 384]),
torch.randn([8, 197, 384]),
],
)
def test_scatter1(self):
def fn(a, dim, index, b):
return aten.scatter(a, dim, index, b)
self.common(
fn,
[
torch.zeros(2, 3),
-1,
torch.tensor([[0]]),
torch.ones(2, 3),
],
)
def test_scatter2(self):
if self.device == "cuda":
raise unittest.SkipTest("unstable on sm86")
def fn(a, dim, index, b):
return aten.scatter.reduce(a, dim, index, b, reduce="add")
self.common(
fn,
[
torch.zeros(64, 512),
0,
torch.zeros((64, 512), dtype=torch.int64),
torch.ones(64, 512),
],
)
def test_scatter3(self):
def fn(a, dim, index, b):
return aten.scatter(a, dim, index, b, reduce="add")
self.common(
fn,
[
torch.randn(5, 29, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
0.8, # src can be a scalar
],
# Mismatched elements: 1 / 1885 (0.1%)
# Greatest absolute difference: 0.00018310546875 at index (0, 0, 3) (up to 1e-05 allowed)
# Greatest relative difference: 0.0022371364653243847 at index (0, 0, 3) (up to 0.001 allowed)
atol=2e-4,
rtol=1e-3,
)
def test_scatter4(self):
def fn(x, ind, src):
return torch.scatter(x, 0, ind, src)
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self.common(
fn,
[
torch.randn(196, 992),
torch.randint(196, (1, 992)),
torch.randn(1, 992),
],
)
def test_scatter5(self):
def fn(a, dim, index, b, reduce):
a = a.clone()
a.scatter_(dim, index, b, reduce=reduce)
a1 = a + 1.0
a1.scatter_(dim, index, b, reduce=reduce)
return (a, a1)
for reduce in ["add", "multiply"]:
self.common(
fn,
[
torch.ones((4, 5)),
0,
torch.tensor([[1], [2], [3]], dtype=torch.int64),
torch.randn(4, 5),
reduce,
],
)
def test_scatter6(self):
def fn(a, dim, index, b):
return aten.scatter(a, dim, index, b)
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self.common(
fn,
[
torch.randn(5, 8, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
0.8, # src can be a scalar
],
)
@unittest.skip("Flaky test, needs debugging")
def test_scatter_add1(self):
def fn(a, dim, index, b):
return aten.scatter_add(a, dim, index, b)
self.common(
fn,
[
torch.randn(2, 3),
0,
torch.tensor([[0]]),
torch.randn(2, 3),
],
)
def test_scatter_add2(self):
def fn(a, dim, index, b):
return aten.scatter_add(a, dim, index, b)
self.common(
fn,
[
torch.randn(2, 3),
0,
torch.tensor([[0, 0, 0], [1, 1, 1]]),
torch.randn(2, 3),
],
)
def test_scatter_add3(self):
def fn(a, dim, index, b):
return aten.scatter_add(a, dim, index, b)
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self.common(
fn,
[
torch.randn(5, 29, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
torch.randn(1, 1, 10),
],
)
def test_scatter_reduce1(self):
def fn(a, dim, index, b):
return aten.scatter_reduce(a, dim, index, b, "sum")
self.common(
fn,
[
torch.randn(5, 29, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
torch.randn(1, 1, 10),
],
)
def test_scatter_reduce2(self):
def fn(a, dim, index, b):
return aten.scatter_reduce(a, dim, index, b, "sum", include_self=False)
self.common(
fn,
[
torch.randn(2, 3),
0,
torch.zeros((2, 3), dtype=torch.int64),
torch.randn(2, 3),
],
)
def test_scatter_reduce3(self):
def fn(a, dim, index, b, reduce):
a = a.clone()
a.scatter_reduce_(dim, index, b, reduce=reduce)
a1 = a + 1.0
a1.scatter_reduce_(dim, index, b, reduce=reduce)
return (a, a1)
for reduce in ["sum", "prod"]:
self.common(
fn,
[
torch.ones((4, 5)),
0,
torch.tensor([[1], [2], [3]], dtype=torch.int64),
torch.randn(4, 5),
reduce,
],
)
# issue #1150
def test_dense_mask_index(self):
if self.device == "cpu":
raise unittest.SkipTest(
"https://github.com/pytorch/torchdynamo/issues/1697"
)
def fn(x, y):
y = torch.ops.aten.select.int(y, 0, 2)
z = x * y
return z.sum()
self.common(fn, [torch.randn(102400), torch.randn(3)])
def test_empty1(self):
def fn():
return torch.empty((1, 128, 128))
self.common(fn, [], assert_equal=False)
def test_empty2(self):
def fn():
return aten.empty((1, 128, 128))
self.common(fn, [], assert_equal=False)
def test_new_empty(self):
def fn(a):
return aten.new_empty(a, [1, 128, 128])
self.common(fn, [torch.randn(55)], assert_equal=False)
def test_empty_strided(self):
def fn():
return aten.empty_strided([1, 128, 128], [16384, 128, 1])
self.common(fn, [], assert_equal=False)
def test_new_empty_strided(self):
def fn(a):
return aten.new_empty_strided(a, [1, 128, 128], [16384, 128, 1])
self.common(fn, [torch.randn(55)], assert_equal=False)
def test_dropout_trivial_0(self):
def fn1(a):
return torch.nn.functional.dropout(a, 0.0, True) + a
self.common(fn1, [torch.randn(55)])
def test_dropout_trivial_1(self):
def fn2(a):
return torch.nn.functional.dropout(a, 1.0, True) + a
self.common(fn2, [torch.randn(55)])
@config.patch({"triton.cudagraphs": True})
def test_dropout(self):
random.seed(1234)
torch.manual_seed(1234)
@torch._dynamo.optimize("inductor")
def fn1(a):
return torch.nn.functional.dropout(a)
x = torch.ones(1000, device=self.device, dtype=torch.float32)
result1 = fn1(x)
self.assertTrue(400 < result1.nonzero().shape[0] < 600)
self.assertTrue(0.9 < result1.mean().item() < 1.1)
random.seed(1234)
torch.manual_seed(1234)
@torch._dynamo.optimize("inductor")
def fn2(a):
return torch.nn.functional.dropout(a, 0.5, True)
result2 = fn2(x)
self.assertTrue(400 < result2.nonzero().shape[0] < 600)
self.assertTrue(0.9 < result2.mean().item() < 1.1)
def test_dropout_deterministic(self):
@torch._dynamo.optimize("inductor")
def fn(a):
return torch.nn.functional.dropout(a, 0.55, True)
for cg in [False, True] if not torch.version.hip else [False]:
with patch.object(config.triton, "cudagraphs", cg):
torch._dynamo.reset()
x = torch.ones(1024, device=self.device, dtype=torch.float32)
torch.manual_seed(1234)
a0 = fn(x).clone()
a1 = fn(x).clone()
a2 = fn(x).clone()
torch.manual_seed(1234)
b0 = fn(x).clone()
b1 = fn(x).clone()
b2 = fn(x).clone()
# same seed, same values
self.assertTrue(torch.allclose(a0, b0))
self.assertTrue(torch.allclose(a1, b1))
self.assertTrue(torch.allclose(a2, b2))
# different calls, different values
self.assertFalse(torch.allclose(a0, a1))
self.assertFalse(torch.allclose(a1, a2))
def test_rand_like_deterministic(self):
@torch._dynamo.optimize("inductor")
def fn(a):
return torch.rand_like(a), torch.rand_like(a)
x = torch.ones(1024, device=self.device, dtype=torch.float32)
torch.manual_seed(1234)
a0 = fn(x)[0].clone()
a1 = fn(x)[0].clone()
a2 = fn(x)[0].clone()
torch.manual_seed(1234)
b0 = fn(x)[0].clone()
b1 = fn(x)[0].clone()
b2 = fn(x)[0].clone()
# same seed, same values
self.assertTrue(torch.allclose(a0, b0))
self.assertTrue(torch.allclose(a1, b1))
self.assertTrue(torch.allclose(a2, b2))
# different calls, different values
self.assertFalse(torch.allclose(a0, a1))
self.assertFalse(torch.allclose(a1, a2))
c, d = fn(x)
self.assertFalse(torch.allclose(c, d))
self.assertTrue((c >= 0).all())
self.assertTrue((c < 1).all())
self.assertTrue((d >= 0).all())
self.assertTrue((d < 1).all())
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_philox_rand(self):
if self.device == "cpu":
raise unittest.SkipTest(
"functionalization of rng ops supported only on CUDA"
)
@torch._dynamo.optimize("inductor")
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
def check(x):
torch.manual_seed(123)
a = fn(x)
torch.manual_seed(1234)
b = fn(x)
torch.manual_seed(123)
c = fn(x)
# same seed, same values
self.assertTrue(torch.allclose(a, c))
# different calls, different values
self.assertFalse(torch.allclose(a, b))
check(torch.ones(1024, device=self.device, dtype=torch.float32))
self.assertEqual(torch.cuda._get_rng_state_offset(), 2048)
# Check non-multiple of 4 numel
check(torch.ones(3, device=self.device, dtype=torch.float32))
self.assertEqual(torch.cuda._get_rng_state_offset(), 8)
def test_randn_like_empty(self):
class Model(torch.nn.Module):
def __init__(
self,
):
super().__init__()
def forward(self, v1: torch.Tensor):
vx = v1.min(dim=1).values
v2 = torch.randn_like(vx)
return v2
model = Model()
x = torch.rand(10, 3, 0)
self.common(model, (x,))
def test_randint(self):
@torch.compile(fullgraph=True)
def fn(x):
return (
torch.randint(10, [1024], device=x.device),
torch.randint(-4, 7, [1024], dtype=torch.int32, device=x.device),
torch.randint_like(x, 2**50),
)
torch.manual_seed(12345)
a0, b0, c0 = fn(torch.zeros([40, 40], device=self.device))
self.assertEqual(a0.shape, [1024])
self.assertEqual(b0.shape, [1024])
self.assertEqual(c0.shape, [40, 40])
torch.manual_seed(12345)
a1, b1, c1 = fn(torch.zeros([40, 40], device=self.device))
self.assertEqual(a0, a1)
self.assertEqual(b0, b1)
self.assertEqual(c0, c1)
self.assertEqual(a0.min(), 0)
self.assertEqual(a0.max(), 9)
self.assertEqual(b0.min(), -4)
self.assertEqual(b0.max(), 6)
self.assertGreaterEqual(c0.min(), 0)
self.assertGreater(c0.max(), 2**40)
self.assertLess(c0.max(), 2**50)
@config.patch(fallback_random=True)
def test_like_rands(self):
def fn(x):
return torch.rand_like(x), torch.randn_like(x)
self.common(fn, [torch.zeros([20, 20])])
def test_like_rands2(self):
# rand_like with kwargs `device` of str type
d = self.device
assert isinstance(d, str)
@torch.compile
def fn(x):
return torch.rand_like(x, device=d)
x = torch.ones(10, device=self.device, dtype=torch.float32)
a0 = fn(x).clone()
a1 = fn(x).clone()
self.assertFalse(torch.allclose(a0, a1))
@requires_cuda()
def test_like_rands3(self):
# rand_like with `device` which is different from `x.device`
def test_like_rands_on_different_device(device1, device2):
@torch.compile
def fn(x, device):
return torch.rand_like(x, device=device)
x = torch.ones(10, device=device1, dtype=torch.float32)
return fn(x, device2).clone()
a0 = test_like_rands_on_different_device("cpu", "cuda")
a1 = test_like_rands_on_different_device("cuda", "cpu")
self.assertTrue(a0.device.type == "cuda")
self.assertTrue(a1.device.type == "cpu")
def test_max_pool2d_with_indices_backward(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [2, 2], [2, 2], [0, 0], [1, 1], False, c
)
x = torch.randn([2, 4, 18, 14])
result, indices = aten.max_pool2d_with_indices(
x,
[2, 2],
[2, 2],
[0, 0],
[1, 1],
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
def test_max_pool2d_with_indices_backward2(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [3, 3], [2, 2], [1, 1], [1, 1], True, c
)
x = torch.randn([2, 4, 40, 56])
result, indices = aten.max_pool2d_with_indices(
x,
[3, 3],
[2, 2],
[1, 1],
[1, 1],
True,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
# From https://github.com/pytorch/torchdynamo/issues/1200
def test_max_pool2d_with_indices_backward3(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [1, 1], [2, 2], [0, 0], [1, 1], False, c
)
x = torch.randn([32, 256, 37, 38])
result, indices = aten.max_pool2d_with_indices(
x,
[1, 1],
[2, 2],
0,
1,
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
# From https://github.com/pytorch/torchdynamo/issues/1352
def test_max_pool2d_with_indices_backward4(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [5, 5], [1, 1], [2, 2], [1, 1], False, c
)
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([2, 64, 3, 4])
result, indices = aten.max_pool2d_with_indices(
x,
[5, 5],
[1, 1],
2,
1,
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_max_pool2d_with_indices_backward5(self):
# Window size is too big. Should fallback
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [13, 13], [1, 1], [2, 2], [1, 1], False, c
)
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([2, 64, 20, 20])
result, indices = aten.max_pool2d_with_indices(
x,
[13, 13],
[1, 1],
2,
1,
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
# From https://github.com/pytorch/pytorch/issues/93384
def test_max_pool2d_with_indices_backward6(self):
# dilation is not 1. Should fallback
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [3, 2], [2, 1], [1, 1], [1, 2], False, c
)
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([2, 2, 3, 6])
result, indices = aten.max_pool2d_with_indices(
x,
[3, 2],
[2, 1],
[1, 1],
[1, 2],
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
def test_issue102546(self):
def fn(x):
return x.mean(0)
self.common(fn, [torch.rand(())])
def test_avg_pool2d_backward(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[2, 2],
[2, 2],
[0, 0],
True,
False,
None,
)
self.common(
fn,
[
torch.randn([2, 4, 7, 7]),
torch.randn([2, 4, 14, 14]),
],
)
@skipIfRocm
def test_avg_pool2d_backward2(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[3, 3],
[1, 1],
[1, 1],
True,
False,
None,
)
self.common(
fn,
[
torch.randn([1, 1, 20, 15]),
torch.randn([1, 1, 20, 15]),
],
)
def test_avg_pool2d_backward3(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[1, 1],
[2, 2],
[0, 0],
False,
False,
None,
)
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
[
torch.randn([1, 2016, 11, 11]),
torch.randn([1, 2016, 21, 21]),
],
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
def test_avg_pool2d_backward4(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[13, 13],
[1, 1],
[0, 0],
True,
False,
None,
)
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
[
torch.randn([1, 16, 12, 12]),
torch.randn([1, 16, 24, 24]),
],
check_lowp=False,
)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
@config.patch(search_autotune_cache=False)
def test_mm_views(self):
def fn(a, b):
return torch.mm(a.view(32, 32), b.view(32, 32))
self.common(
fn,
(
torch.randn([32, 32]).transpose(0, 1),
torch.randn([1, 32, 32]).transpose(0, 1),
),
check_lowp=False,
)
expected_kernel = 0
# codegen mm kernel from template
self.assertEqual(
torch._inductor.metrics.generated_kernel_count, expected_kernel
)
@torch._dynamo.config.patch(dynamic_shapes=True)
@torch._dynamo.config.patch(assume_static_by_default=False)
def test_dtype_sympy_expr(self):
torch._inductor.metrics.disable_cpp_wrapper = 0
@torch._dynamo.optimize_assert("inductor")
def fn(a):
y = a[..., :-1, :].contiguous()
return y
result = fn(torch.randn([1, 2, 16, 4]).requires_grad_())
result.sum().backward()
expected_disable_cpp_wrapper = 0
self.assertEqual(
torch._inductor.metrics.disable_cpp_wrapper, expected_disable_cpp_wrapper
)
def test_dropout2(self):
n = 100000
weight = torch.ones(
n, device=self.device, dtype=torch.float32, requires_grad=True
)
ones = torch.ones(n, device=self.device, dtype=torch.float32)
@torch._dynamo.optimize_assert("inductor")
def run(x, train=True):
return F.dropout(x * weight, 0.33, train)
def check(r, g):
rmean = r.mean().item()
gmean = g.mean().item()
rcount = len(r.nonzero())
gcount = len(g.nonzero())
# dropped elements should match
self.assertTrue(same(r.nonzero(), g.nonzero()))
self.assertEqual(rcount, gcount)
# dropped should be close to 0.33
self.assertGreater(rcount, 0.64 * n)
self.assertGreater(0.68 * n, rcount)
self.assertAlmostEqual(rmean, gmean)
self.assertAlmostEqual(rmean, 1.0, places=2)
r1 = run(ones, train=False)
r1.sum().backward()
g1 = weight.grad.clone()
# eval mode should be all ones
self.assertTrue(same(r1, torch.ones_like(r1)))
self.assertTrue(same(g1, torch.ones_like(g1)))
torch.manual_seed(1234)
weight.grad.zero_()
r2, (fw_code, bw_code) = run_fw_bw_and_get_code(lambda: run(ones))
if self.device == "cuda":
self.assertEqual(fw_code.count("tl.rand"), 1)
self.assertEqual(bw_code.count("tl.rand"), 0)
g2 = weight.grad.clone()
check(r2, g2)
torch.manual_seed(1234)
weight.grad.zero_()
r3 = run(ones)
r3.sum().backward()
g3 = weight.grad.clone()
check(r3, g3)
# second run is same result as first
self.assertTrue(same(r2, r3))
self.assertTrue(same(g2, g3))
@config.patch(search_autotune_cache=False)
def test_dropout3(self):
m = torch.nn.Sequential(
torch.nn.Linear(32, 32, bias=False),
torch.nn.Dropout(),
torch.nn.Linear(32, 32, bias=False),
torch.nn.Dropout(),
).to(self.device)
@torch._dynamo.optimize_assert("inductor")
def run(x):
return m(x)
torch._inductor.metrics.generated_kernel_count = 0
result, (fw_code, bw_code) = run_fw_bw_and_get_code(
lambda: run(torch.randn([8, 32], device=self.device))
)
if self.device == "cuda":
self.assertEqual(fw_code.count("tl.rand"), 1)
self.assertEqual(bw_code.count("tl.rand"), 0)
expected_kernel = 4
else:
expected_kernel = 6
self.assertEqual(
torch._inductor.metrics.generated_kernel_count, expected_kernel
)
def test_randint_kernel_count(self):
@torch._dynamo.optimize_assert("inductor")
def fn1():
random_tensor1 = torch.randint(10, [32], device=self.device)
random_tensor2 = torch.randint(10, [32], device=self.device)
random_tensor3 = torch.randint(10, [32], device=self.device)
return random_tensor1, random_tensor2, random_tensor3
_, source_codes = run_and_get_code(fn1)
if self.device == "cuda":
self.assertEqual(len(source_codes), 1)
self.assertEqual(source_codes[0].count("async_compile.triton"), 1)
def test_roll(self):
def fn(a):
return (
aten.roll(a, [-3, 10], [1, 2]),
aten.roll(a, [5]),
)
self.common(
fn,
[
torch.randn([2, 56, 56, 16]),
],
)
def test_argmax_min_int32(self):
# https://github.com/pytorch/pytorch/issues/94055
def fn(a, b):
c = a.argmax(3)
return torch.min(b, c)
a = torch.rand(3, 4, 2, 1).int()
b = torch.rand(2, 2, 1, 4, 1).int()
self.common(fn, (a, b))
def test_argmax_argmin1(self):
def fn(x):
return (aten.argmax(x), aten.argmin(x))
self.common(
fn,
[
torch.randn([8, 256, 256]),
],
)
def test_argmax_argmin2(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, 1),
aten.argmin(x, 1),
)
self.common(fn, (torch.randn([144, 144]),))
def test_argmax_argmin_with_duplicates(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, 1),
aten.argmin(x, 1),
)
# Unrolled reduction
t1 = torch.randint(2, size=(6, 6))
self.common(fn, (t1,))
# Persistent reduction
t1 = torch.randint(8, size=(32, 32))
self.common(fn, (t1,))
# Non-persistent reduction
t1 = torch.randint(8, size=(1028, 1028))
self.common(fn, (t1,))
def test_argmax_argmin_with_nan(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, 1),
aten.argmin(x, 1),
)
if self.device == "cpu":
raise unittest.SkipTest("broken on CPU")
# Unrolled reduction
t1 = torch.randn((6, 6))
t1[:, 1] = float("nan")
t1[:, 3] = float("nan")
self.common(fn, (t1,))
# Persistent reduction
t1 = torch.randn((32, 32))
t1[:, 4] = float("nan")
t1[:, 8] = float("nan")
self.common(fn, (t1,))
# Non-persistent reduction
t1 = torch.randn((1028, 1028))
t1[:, 40] = float("nan")
t1[:, 100] = float("nan")
self.common(fn, (t1,))
def test_conv_backward(self):
def fn(rank4_inps, rank3_inps, rank5_inps):
out1 = aten.convolution_backward(
*rank4_inps,
[C],
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
[True, True, True],
)
out2 = aten.convolution_backward(
*rank4_inps,
[C],
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
[True, False, False],
)
out3 = aten.convolution_backward(
*rank3_inps,
[C],
[1],
[0],
[1],
False,
[0],
1,
[True, True, True],
)
out4 = aten.convolution_backward(
*rank5_inps,
[C],
[1, 1, 1],
[0, 0, 0],
[1, 1, 1],
False,
[0, 0, 0],
1,
[True, True, True],
)
return (out1, out2, out3, out4)
B = 3
C = 4
H = 5
grad_out = torch.randn(B, C, H - 2, H - 2, H - 2)
inp = torch.randn(B, C, H, H, H)
weight = torch.randn(C, C, 3, 3, 3)
def shrink_rank(x, rank):
res = x
while res.dim() > rank:
res = torch.select(res, -1, 0)
return res.contiguous()
rank4_inps = [shrink_rank(x, 4) for x in [grad_out, inp, weight]]
rank3_inps = [shrink_rank(x, 4) for x in [grad_out, inp, weight]]
rank5_inps = [shrink_rank(x, 5) for x in [grad_out, inp, weight]]
with torch.backends.cudnn.flags(enabled=True, allow_tf32=False):
self.common(
fn,
[rank4_inps, rank3_inps, rank5_inps],
)
@unittest.skip(
"""
FIXME: In the case of having equally max/min elements, our implementation returns
the last index instead of the first one
"""
)
def test_argmax_argmin3(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, -1),
aten.argmin(x, -1),
)
self.common(
fn,
[torch.randint(0, 5, [10, 10])],
)
def test_vdd_clamp(self):
def fn(x):
return torch.clamp_min(x, 3)
self.common(
fn,
[
torch.randn([16], requires_grad=True) * 10,
],
)
def test_tmp_not_defined_issue1(self):
def forward(
primals_3,
primals_4,
add_tensor,
convert_element_type_default,
div_default,
reciprocal_default,
):
var_default = torch.ops.aten.var(
convert_element_type_default, [2], correction=0
)
sub_tensor = torch.ops.aten.sub.Tensor(add_tensor, div_default)
mul_tensor_1 = torch.ops.aten.mul.Tensor(sub_tensor, reciprocal_default)
mul_tensor_2 = torch.ops.aten.mul.Tensor(mul_tensor_1, primals_3)
add_tensor_2 = torch.ops.aten.add.Tensor(mul_tensor_2, primals_4)
convert_element_type_default_1 = add_tensor_2.to(dtype=torch.float32)
convert_element_type_default_2 = convert_element_type_default_1.to(
dtype=torch.float32
)
var_default_1 = torch.ops.aten.var(
convert_element_type_default_2, [2], correction=0
)
broadcast_in_dim_default_2 = var_default_1.reshape(1, 512, 1)
sum_default_1 = convert_element_type_default_2.sum(2)
add_tensor_3 = torch.ops.aten.add.Tensor(broadcast_in_dim_default_2, 1e-05)
return (var_default, sum_default_1, add_tensor_3)
inps = [
(torch.Size([1024]), torch.float32),
(torch.Size([1024]), torch.float32),
(torch.Size([1, 512, 1024]), torch.float32),
(torch.Size([1, 512, 1024]), torch.float32),
(torch.Size([1, 512, 1]), torch.float32),
(torch.Size([1, 512, 1]), torch.float32),
]
inps = [torch.randn(shape, dtype=dtype) for (shape, dtype) in inps]
self.common(forward, inps, atol=1e-05, rtol=2e-05)
@unittest.skipIf(
TEST_WITH_ASAN
or os.environ.get("BUILD_ENVIRONMENT", "").startswith("parallelnative"),
"TODO: debug this with asan",
)
def test_tmp_not_defined_issue2(self):
def forward(arg38_1, arg81_1, getitem_17, new_zeros_default_4):
div_tensor_7 = torch.ops.aten.div.Tensor(getitem_17, arg81_1)
mul_tensor_24 = torch.ops.aten.mul.Tensor(div_tensor_7, arg38_1)
sum_default_7 = torch.ops.aten.sum.default(mul_tensor_24)
return (new_zeros_default_4, sum_default_7)
# TODO: Remove once https://github.com/pytorch/pytorch/issues/94017 is resolved
dtype = torch.float64 if self.device == "cpu" else torch.float32
args = [
((1, 88, 40, 40), (140800, 1600, 40, 1), dtype),
((), (), dtype),
((1, 88, 40, 40), (140800, 1600, 40, 1), dtype),
((3,), (1,), dtype),
]
args = [
rand_strided(shape, stride, dtype).requires_grad_(True).add(1)
for shape, stride, dtype in args
]
self.common(forward, args)
def test_misaligned_address_issue1(self):
def forward(sub_tensor_1, unsqueeze_default):
gather_default = torch.ops.aten.gather.default(
sub_tensor_1, 1, unsqueeze_default
)
return gather_default
args = [
((1, 1000), (1000, 1), torch.float32),
((1, 1), (1, 1), torch.int64),
]
args = [rand_strided(shape, stride, dtype) for shape, stride, dtype in args]
self.common(forward, args)
def test_invalid_operand_issue1(self):
def forward(arg0_1, arg1_1, arg3_1, squeeze, view_1, slice_1):
slice_scatter = torch.ops.aten.slice_scatter.default(
slice_1, arg3_1, 1, 1, 9223372036854775807
)
slice_scatter_1 = torch.ops.aten.slice_scatter.default(
arg1_1, slice_scatter, 0, 0, 9223372036854775807
)
slice_2 = torch.ops.aten.slice.Tensor(
slice_scatter_1, 0, 0, 9223372036854775807
)
select_scatter = torch.ops.aten.select_scatter.default(
slice_2, squeeze, 1, 0
)
slice_scatter_2 = torch.ops.aten.slice_scatter.default(
slice_scatter_1, select_scatter, 0, 0, 9223372036854775807
)
view = torch.ops.aten.view.default(slice_scatter_2, [-1, 128])
embedding = torch.ops.aten.embedding.default(arg0_1, view, 1)
return [embedding, view_1]
args = [
((50005, 768), (768, 1), torch.float32),
((8, 128), (128, 1), torch.int64),
((8, 127), (127, 1), torch.int64),
((8,), (1,), torch.int64),
((1024,), (1,), torch.int64),
((8, 128), (128, 1), torch.int64),
]
args = [rand_strided(shape, stride, dtype) for shape, stride, dtype in args]
self.common(forward, args)
def test_sizehint_issue1(self):
def forward(x):
return torch.nn.functional.unfold(
x, kernel_size=[4, 4], dilation=1, padding=0, stride=[4, 4]
)
args = [((2, 24, 56, 56), (75264, 3136, 56, 1), torch.float32, False)]
args = [
rand_strided(sh, st, dt).requires_grad_(rg) for (sh, st, dt, rg) in args
]
self.common(forward, args)
def test_zero_dim_reductions(self):
for kd in [True, False]:
inps0 = (torch.zeros(2, 0, device=self.device, dtype=torch.float16), 1, kd)
failed_ops = [aten.argmin, aten.argmax, aten.max, aten.min]
for fo in failed_ops:
with self.assertRaisesRegex(
IndexError, "Expected reduction dim 1 to have non-zero size"
):
mod = make_fx(fo)(*inps0)
_ = compile_fx_inner(mod, inps0)
pass_ops = [
lambda *x: fn(*x) for fn in [aten.sum, aten.prod, aten.any, aten.all]
]
for po in pass_ops:
compiled = torch._dynamo.optimize("inductor")(po)
expected = po(*inps0)
actual = compiled(*inps0)
self.assertTrue(torch.allclose(actual, expected, atol=1e-3, rtol=1e-3))
def test_lerp(self):
# non-contiguous inputs for lerp
def fn0(i0, i1):
x1 = i0.transpose(-2, -3)
return torch.lerp(i1, x1, 70000)
# contiguous inputs for lerp
def fn1(i0, i1):
return torch.lerp(i1, i0, 70000)
def compare(fn, inputs):
compiled = torch._dynamo.optimize("inductor")(fn)
expected = fn(*inputs)
actual = compiled(*inputs)
self.assertEqual(expected, actual)
self.assertEqual(expected.stride(), actual.stride())
compare(fn0, [torch.rand(10, 3, 10), torch.rand(3, 10, 10)])
compare(fn1, [torch.rand(3, 10, 10), torch.rand(3, 10, 10)])
def test_unspec_inputs(self):
if self.device == "cpu":
raise unittest.SkipTest("segfault with CPU backend")
def fn(x, y):
return x + y, x * y, x / y
opt = torch._dynamo.optimize("inductor")(fn)
dtypes = [
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
torch.int32,
torch.int64,
]
for d in dtypes:
inputs = (
rand_strided((2, 3), (3, 1), dtype=torch.float32, device="cuda"),
rand_strided((), (), dtype=d, device="cpu"),
)
self.assertTrue(same(opt(*inputs), fn(*inputs)))
inputs = (inputs[1], inputs[0])
self.assertTrue(same(opt(*inputs), fn(*inputs)))
def test_list_clearing(self):
if self.device == "cpu":
contexts = [contextlib.nullcontext]
else:
contexts = [
contextlib.nullcontext,
lambda: config.patch(
{"triton.cudagraphs": True if not torch.version.hip else False}
),
]
for context in contexts:
with context():
inps = [
torch.rand([5, 5]).to(self.device),
torch.rand([5, 5]).to(self.device),
]
inp_refs = [weakref.ref(inp) for inp in inps]
def fn(x, y):
a = x + y
return (a @ a,)
fn_fx = make_fx(fn)(inps[0], inps[1])
fn_compiled = compile_fx_inner(fn_fx, inps)
test_self = self
matmul_seen = False
class TestRefMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
nonlocal inps
nonlocal inp_refs
nonlocal test_self
nonlocal matmul_seen
# by matmul, inputs should be deallocated
if func is aten.mm.out:
matmul_seen = True
test_self.assertEqual(len(inps), 0)
test_self.assertIsNone(inp_refs[0]())
test_self.assertIsNone(inp_refs[1]())
return func(*args, **kwargs)
with TestRefMode():
fn_compiled(inps)
# do an extra run to make sure we are deallocating on warmup and record
if self.device == "cuda":
inps.extend(
[
torch.rand([5, 5]).to(self.device),
torch.rand([5, 5]).to(self.device),
]
)
inp_refs.extend([weakref.ref(inp) for inp in inps])
matmul_seen = False
with TestRefMode():
fn_compiled(inps)
# for some reason, TorchDispatch doesnt capture the
# cuda mm call (even without cudagraphs)
if self.device == "cpu":
self.assertTrue(matmul_seen)
else:
self.assertEqual(len(inps), 0)
def test_dtype_mismatch_issue(self):
def fn(x):
attn = torch.nn.functional.pad(x, [0, 1])
return attn.softmax(dim=-1)
x = torch.rand(128, 32, 63)
res_ref = fn(x)
res = torch._dynamo.optimize("inductor")(fn)(x)
self.assertEqual(res, res_ref)
def test_kwargs(self):
if self.device == "cuda":
raise unittest.SkipTest("histogramdd only supports cpu")
def fn(x, y):
return torch.histogramdd(
x,
bins=[3, 3],
weight=y,
)
self.common(
fn,
[torch.randn((4, 2)), torch.randn((4))],
)
@requires_cuda()
@skipIfRocm
@torch._inductor.config.patch("shape_padding", True)
def test_shape_padding(self):
if torch._dynamo.config.dynamic_shapes:
raise unittest.SkipTest("dynamic shapes do not support padding")
dtypes = [
torch.float16,
torch.float32,
]
b, m, n, k = 7, 11, 13, 15
def gen(*shape, dtype=torch.float32):
return torch.randn(*shape, device="cuda", dtype=dtype) / k + 1.0
for dtype in dtypes:
x = gen(m, k, dtype=dtype)
y = gen(k, n, dtype=dtype)
z = gen(n, dtype=dtype)
self.common(lambda x, y: torch.mm(x, y), (x, y))
self.common(lambda x, y: torch.matmul(x, y), (x, y))
self.common(lambda x, y, z: torch.addmm(z, x, y), (x, y, z))
for dtype in dtypes:
x = gen(b, m, k, dtype=dtype)
y = gen(b, k, n, dtype=dtype)
z = gen(n, dtype=dtype)
self.common(lambda x, y: torch.bmm(x, y), (x, y))
self.common(lambda x, y: torch.matmul(x, y), (x, y))
self.common(lambda x, y, z: torch.baddbmm(z, x, y), (x, y, z))
@torch._dynamo.config.patch(dynamic_shapes=True)
def test_int_input_dynamic_shapes(self):
@torch.compile(dynamic=True)
def fn(x, i):
y = x * i
return y
# Constant must not get matched as constant
self.common(fn, [torch.randn(3, 1, 1, 1, 1), 9132])
@torch._dynamo.config.patch(dynamic_shapes=True)
def test_sqrt_dynamic_shapes(self):
# TIMM convit_base model: https://github.com/pytorch/pytorch/issues/97877.
# TODO: support cuda path.
if self.device == "cuda":
raise unittest.SkipTest("sqrt dynamic shapes only supports cpu")
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
B, N, C = x.shape
return self.get_rel_indices(N)
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
img_size = int(num_patches**0.5)
ind = torch.arange(img_size)
return ind
self.common(
Model(),
[
torch.randn(8, 4, 4),
],
)
@torch._dynamo.config.patch(dynamic_shapes=True)
def test_index_dynamic_shapes(self):
if self.device == "cuda":
raise unittest.SkipTest("index dynamic shapes only supports cpu")
# Repro from vision_maskrcnn
def fn(arg0_1):
unsqueeze = arg0_1.unsqueeze(0)
sym_size = arg0_1.size(1)
ceil = math.ceil(sym_size * 1.8735363483428955)
iota = torch.ops.prims.iota.default(
ceil,
start=0,
step=1,
dtype=torch.int64,
device="cpu",
requires_grad=False,
)
convert_element_type_1 = iota.to(torch.float32)
sym_size_1 = arg0_1.size(2)
floor_1 = math.floor(sym_size_1 * 1.8735363483428955)
ceil_1 = math.ceil(floor_1)
iota_1 = torch.ops.prims.iota.default(
ceil_1,
start=0,
step=1,
dtype=torch.int64,
device="cpu",
requires_grad=False,
)
convert_element_type_3 = iota_1.to(torch.float32)
sub_2 = (convert_element_type_1 + 0.5) * (sym_size / ceil) - 0.5
clamp_min = sub_2.clamp_min(0.0)
sub_3 = (convert_element_type_3 + 0.5) * (sym_size_1 / floor_1) - 0.5
clamp_min_1 = sub_3.clamp_min(0.0)
convert_element_type_4 = clamp_min.to(torch.int64)
sub_4 = sym_size - 1
clamp_max = clamp_min.ceil().clamp_max(sub_4)
convert_element_type_5 = clamp_max.to(torch.int64)
convert_element_type_6 = clamp_min_1.to(torch.int64)
unsqueeze_2 = convert_element_type_4.unsqueeze(1)
index = torch.ops.aten.index.Tensor(
unsqueeze, [None, None, unsqueeze_2, convert_element_type_6]
)
index_1 = torch.ops.aten.index.Tensor(
unsqueeze,
[
None,
None,
convert_element_type_5.unsqueeze(1),
convert_element_type_6,
],
)
sub_6 = clamp_min.unsqueeze(1) - unsqueeze_2
mul_10 = (index * (1.0 - sub_6) + index_1 * (sub_6)) * (
1.0 - (clamp_min_1 - convert_element_type_6)
)
select = torch.ops.aten.select.int(mul_10, 0, 0)
return (select,)
x = torch.randn(15, 20, 3)
self.common(
fn,
[x],
)
@config.patch(profiler_mark_wrapper_call=True)
def test_profiler_mark_wrapper_call(self):
from torch.profiler import profile
@torch._dynamo.optimize("inductor", nopython=True)
def fn(a, b):
return a + b
a = torch.rand((100,))
b = torch.rand((100,))
with profile() as prof:
fn(a, b)
assert any(
"inductor_wrapper_call" in e.name for e in prof.profiler.function_events
)
@unittest.skipIf(IS_X86 and not HAS_AVX2, "Requires AVX2")
def test_pixel_shuffle_channels_last(self):
def fn(x):
x = torch.nn.functional.pixel_shuffle(x, 2)
x = torch.nn.functional.relu(x)
return x
self.common(
fn,
(torch.randn(1, 16, 64, 72).to(memory_format=torch.channels_last),),
)
def test_where_broadcast(self):
# https://github.com/pytorch/pytorch/issues/93374
def fn(x, p1, p0):
o = torch.where(x, p1, p0)
return o
# https://github.com/pytorch/pytorch/issues/94725
class Repro(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer(
"_tensor_constant0", torch.randn([], dtype=torch.float32)
)
def forward(self, arg0_1, arg1_1):
convert_element_type = torch.ops.prims.convert_element_type.default(
arg1_1, torch.bool
)
bitwise_not = torch.ops.aten.bitwise_not.default(convert_element_type)
_tensor_constant0 = self._tensor_constant0
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(
_tensor_constant0
)
where = torch.ops.aten.where.self(bitwise_not, lift_fresh_copy, arg0_1)
return (where, bitwise_not)
self.common(
fn,
(torch.tensor([[True]]), torch.rand(13, 7, 3), torch.rand(1, 1)),
)
if not torch._dynamo.config.dynamic_shapes:
args = [
torch.randn(1, 4, 64, 64),
torch.zeros(1, 1, 64, 64, dtype=torch.uint8),
]
args[1][:, :, :32, :32] = 1
eager_args = [x.clone() for x in args]
eager_mod = Repro()
mod = make_fx(eager_mod, tracing_mode="real")(*args)
compiled = compile_fx_inner(mod, args)
inductor_out = compiled(args)
eager_out = eager_mod(*eager_args)
self.assertEqual(inductor_out, eager_out)
def test_where_with_logical_op(self):
def fn_and(x, y):
return torch.where(torch.logical_and(x, y), 1.0, 0.0)
def fn_or(x, y):
return torch.where(torch.logical_or(x, y), 1.0, 0.0)
self.common(
fn_and,
(torch.randn(32), torch.randn(32)),
)
self.common(
fn_or,
(torch.randn(32), torch.randn(32)),
)
def test_inplace_where_pointwise(self):
# https://github.com/pytorch/pytorch/issues/96446
def fn(a, b):
a[0] = 2
return a * b
self.common(fn, (torch.rand(1), torch.rand(2)))
def test_view_on_aliased(self):
# https://github.com/pytorch/pytorch/issues/96728
def fn1(a, b):
a = a.max(0).values
c = torch.cat((a, b))
c = c.round()
b >= a[0] # noqa: B015
return c
some_const = torch.tensor(6324)
def fn2():
a = torch.tensor([[0.6324]])
ret = torch.cat((a, a), dim=0)
some_const >= a[0] # noqa: B015
return ret
self.common(fn1, (torch.tensor([[4.0]]), torch.tensor([5.0])))
self.common(fn2, ())
def test_argmax_to_float(self):
# https://github.com/pytorch/pytorch/issues/97127
def fn():
a = torch.zeros([2, 2])
b = a.argmax(0)
return b.float().mean()
self.common(fn, ())
def test_const_int32_to_float(self):
# https://github.com/pytorch/pytorch/issues/97124
def fn():
a = torch.zeros([1, 2], dtype=torch.int32)
a = a + a
b = a.to(dtype=torch.float32)
return b * 0.8
self.common(fn, ())
def test_getitem(self):
out_features = ["p3", "p4", "p5", "p6", "p7"]
in_feature = "p5"
def fn(a):
return a[out_features.index(in_feature)]
for dynamic_shapes in [True, False]:
with torch._dynamo.config.patch(dynamic_shapes=dynamic_shapes):
torch._dynamo.reset()
x = [
torch.rand([1, 256, 100, 152]),
torch.rand([1, 256, 50, 76]),
torch.rand([1, 256, 25, 38]),
]
opt_fn = torch._dynamo.optimize("inductor")(fn)
same(fn(x), opt_fn(x))
def test_pad_view(self):
def fn(a):
y = torch.nn.functional.pad(a, (0, 0, 0, 1))
y = y.view(*y.size()[:-2], y.size(-1), y.size(-2))
return y
for dynamic_shapes in [True, False]:
with torch._dynamo.config.patch(dynamic_shapes=dynamic_shapes):
torch._dynamo.reset()
x = torch.rand(48, 3, 512, 512)
opt_fn = torch._dynamo.optimize("inductor")(fn)
same(fn(x), opt_fn(x))
def test_data_type_propogation(self):
_graph: torch.fx.Graph = torch.fx.Graph()
ops: torch.fx.Node = _graph.create_node("placeholder", "ops")
get_index: torch.fx.Node = _graph.create_node(
"call_module", "get_index", args=("index0",)
)
c1: torch.fx.Node = _graph.create_node(
"call_method",
"constant",
args=(
ops,
get_index,
torch.bfloat16,
),
)
c2: torch.fx.Node = _graph.create_node(
"call_method",
"constant",
args=(
ops,
get_index,
torch.float,
),
)
add: torch.fx.Node = _graph.create_node(
"call_method",
"add",
args=(
ops,
c1,
c2,
),
)
eq: torch.fx.Node = _graph.create_node(
"call_method",
"eq",
args=(
ops,
add,
add,
),
)
argmin: torch.fx.Node = _graph.create_node(
"call_method",
"reduction",
args=(
ops,
"buf",
torch.int64,
torch.int64,
"argmin",
get_index,
add,
),
)
any: torch.fx.Node = _graph.create_node(
"call_method",
"reduction",
args=(
ops,
"buf",
torch.bool,
torch.bool,
"any",
get_index,
add,
),
)
bitwise_not: torch.fx.Node = _graph.create_node(
"call_method",
"bitwise_not",
args=(
ops,
argmin,
),
)
bitwise_or: torch.fx.Node = _graph.create_node(
"call_method",
"bitwise_or",
args=(
ops,
eq,
any,
),
)
bitwise_left_shift: torch.fx.Node = _graph.create_node(
"call_method",
"bitwise_left_shift",
args=(
ops,
argmin,
bitwise_not,
),
)
DataTypePropagation.propagate_graph(_graph)
def get_data_type(node: torch.fx.Node):
if OptimizationContext.key in node.meta:
return node.meta[OptimizationContext.key].dtype
else:
return None
self.assertEqual(get_data_type(ops), None)
self.assertEqual(get_data_type(c1), torch.bfloat16)
self.assertEqual(get_data_type(c2), torch.float)
self.assertEqual(get_data_type(add), torch.float)
self.assertEqual(get_data_type(eq), torch.bool)
self.assertEqual(get_data_type(argmin), torch.int64)
self.assertEqual(get_data_type(any), torch.bool)
self.assertEqual(get_data_type(bitwise_not), torch.int64)
self.assertEqual(get_data_type(bitwise_or), torch.bool)
self.assertEqual(get_data_type(bitwise_left_shift), torch.int64)
def test_AllenaiLongformerBase_repro(self):
def fn(query, scores, window_overlap):
batch_size, seq_len, num_heads, _ = query.size()
chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1
diagonal_attention_scores = scores.new_zeros(
(
batch_size * num_heads,
chunks_count + 1,
window_overlap,
window_overlap * 2 + 1,
)
)
diagonal_attention_scores[:, :-1, :, window_overlap:] = scores[
:, :, :window_overlap, : window_overlap + 1
]
input_tensor = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
beginning_input = input_tensor[:, :window_overlap, :, : window_overlap + 1]
input_tensor[:, :window_overlap, :, : window_overlap + 1] = torch.full_like(
beginning_input, -float("inf")
)
return input_tensor
for dynamic_shapes in [True, False]:
with torch._dynamo.config.patch(dynamic_shapes=dynamic_shapes):
torch._dynamo.reset()
args = [
((4, 1024, 12, 64), (768, 3072, 64, 1), torch.float32, "cpu"),
((48, 3, 512, 513), (787968, 262656, 513, 1), torch.float32, "cpu"),
]
args = [rand_strided(sh, st, dt, dev) for (sh, st, dt, dev) in args]
opt_fn = torch._dynamo.optimize("inductor")(fn)
same(fn(*args, 256), opt_fn(*args, 256))
def test_cumsum_pattern_matcher_issue(self):
def fn(input_ids) -> torch.Tensor:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size, seq_length = input_shape
past_key_values_length = 0
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length)
attention_mask = attention_mask.long()
return torch.cumsum(attention_mask, dim=1)
for dynamic_shapes in [True, False]:
with torch._dynamo.config.patch(dynamic_shapes=dynamic_shapes):
torch._dynamo.reset()
x = torch.randn(2, 2)
opt = torch._dynamo.optimize("inductor")(fn)
res = opt(x)
ref = fn(x)
self.assertEqual(res, ref, atol=0, rtol=0)
def test_slice(self):
def fn(a, b):
return torch.ops.aten.slice.Tensor(a, 0, 0, -b)
for dynamic_shapes in [True, False]:
with torch._dynamo.config.patch(dynamic_shapes=dynamic_shapes):
torch._dynamo.reset()
x = torch.rand(48, 3, 512, 512)
opt_fn = torch._dynamo.optimize("inductor")(fn)
same(fn(x, 2), opt_fn(x, 2))
def test_inplace_resize_as(self):
def fn(x, y):
x.resize_as_(y)
return x
x = torch.randn(2, 3)
y = torch.randn(200, 300)
x_clone = x.clone()
opt_fn = torch._dynamo.optimize("inductor")(fn)
same(fn(x, y), opt_fn(x_clone, y))
def test_erfc(self):
def fn(x):
return torch.erfc(x)
self.common(fn, (torch.randn(8, 8),))
def test_erfinv(self):
def fn(x):
return torch.erfinv(x)
# domain for erfinv is (-1, 1)
x = torch.empty(8, 8).uniform_(-1, 1)
self.common(fn, (x,))
def test_uint(self):
def fn(z):
x = torch.tensor(5, device=z.device, dtype=torch.uint8)
y = torch.neg(x)
return x < y
self.common(fn, (torch.randn(26),))
@skipIfRocm
def test_scaled_dot_product_efficient_attention(self):
if self.device == "cpu":
raise unittest.SkipTest("requires CUDA")
def fn(q, k, v, compute_log_sumexp):
return aten._scaled_dot_product_efficient_attention(
q, k, v, compute_log_sumexp
)
self.common(
fn,
(
torch.randn(4, 4, 36, 36),
torch.randn(4, 4, 36, 36),
torch.randn(4, 4, 36, 36),
False,
),
check_lowp=False,
)
@dataclasses.dataclass
class TestFailure:
suffixes: Tuple[str]
is_skip: bool = False
__test__: bool = False
def copy_tests(my_cls, other_cls, suffix, test_failures=None): # noqa: B902
for name, value in my_cls.__dict__.items():
if name.startswith("test_"):
# You cannot copy functions in Python, so we use closures here to
# create objects with different ids. Otherwise, unittest.skip
# would modify all methods sharing the same object id. Also, by
# using a default argument, we create a copy instead of a
# reference. Otherwise, we would lose access to the value.
@functools.wraps(value)
def new_test(self, value=value):
return value(self)
# Copy __dict__ which may contain test metadata
new_test.__dict__ = copy.deepcopy(value.__dict__)
tf = test_failures and test_failures.get(name)
if tf is not None and suffix in tf.suffixes:
skip_func = (
unittest.skip("Skipped!")
if tf.is_skip
else unittest.expectedFailure
)
new_test = skip_func(new_test)
setattr(other_cls, f"{name}_{suffix}", new_test)
if HAS_CPU and not torch.backends.mps.is_available():
class SweepInputsCpuTest(SweepInputs2, TestCase):
gen = InputGen(10, "cpu")
SweepInputsCpuTest.populate()
class CpuTests(TestCase):
common = check_model
device = "cpu"
copy_tests(CommonTemplate, CpuTests, "cpu")
if HAS_CUDA and not TEST_WITH_ASAN:
class SweepInputsCudaTest(SweepInputs2, TestCase):
gen = InputGen(10, "cuda")
SweepInputsCudaTest.populate()
class CudaTests(TestCase):
common = check_model_cuda
device = "cuda"
copy_tests(CommonTemplate, CudaTests, "cuda")
class TritonCodeGenTests(TestCase):
from torch._inductor.triton_heuristics import CachingAutotuner
class NoOpCompilerBackend:
def __init__(self):
self.example_args = None
self.model = None
def noop_backend(
self,
model_: torch.fx.GraphModule,
example_inputs_: typing.List[torch.Tensor],
):
"""
The Noop backend does not compile the fx graph it is given.
Instead, it transforms the fx graph so that its functions are
aten operations. It then saves this graph.
"""
from torch._functorch.aot_autograd import Interpreter
from torch._inductor.decomposition import select_decomp_table
from torch._subclasses import FakeTensorMode
fake_mode = FakeTensorMode()
def interpret(*args, **kwargs):
return Interpreter(model_).run(*args[0:], **kwargs)
fake_flat_tensor_args = [
fake_mode.from_tensor(x) for x in example_inputs_
]
fw_module = make_fx(interpret, select_decomp_table())(
*fake_flat_tensor_args
)
self.model = fw_module
self.example_args = fake_flat_tensor_args
return lambda x: example_inputs_
def get_kernels(self, fn, args) -> typing.List[CachingAutotuner]:
from torch._inductor.debug import DebugContext
from torch._inductor.graph import GraphLowering
from torch._inductor.virtualized import V
cxt = TritonCodeGenTests.NoOpCompilerBackend()
torch._dynamo.optimize(backend=cxt.noop_backend)(fn)(*args)
graph = GraphLowering(cxt.model)
graph.num_static_inputs = 0
kernels = []
with V.set_graph_handler(graph), V.set_debug_handler(DebugContext()):
graph.run(*(cxt.example_args))
mod = graph.compile_to_module()
for val in mod.__dict__.values():
if isinstance(
val, torch._inductor.triton_heuristics.CachingAutotuner
):
kernels.append(val)
return kernels
def test_divisibile_by_16_covers_numel_args(self):
torch._dynamo.reset()
def fn(a: torch.Tensor) -> torch.Tensor:
return torch.sum(a)
kernels = self.get_kernels(fn, [torch.randn([256, 256], device="cuda")])
self.assertTrue(len(kernels) == 2, "SUM should result in two kernels")
# kernel0 reduces from 256 to (xnumel=8, rnumel=8192), which means it reduces 256 by 256 into an array of
# size 8 by accumulating 8192 elements at once note that rnumel is equal to 512 * 16, so rnumel which is
# at slot 3 should be in the divisible by 16 descriptor
arguments_that_are_divisible_by_16_in_kernel0 = (
kernels[0].meta["configs"][0].divisible_by_16
)
self.assertEqual(arguments_that_are_divisible_by_16_in_kernel0, (0, 1, 3))
# kernel1 reduces from 8 elements to a single scalar.
arguments_that_are_divisible_by_16_in_kernel1 = (
kernels[1].meta["configs"][0].divisible_by_16
)
self.assertEqual(arguments_that_are_divisible_by_16_in_kernel1, (0, 1))
torch._dynamo.reset()
def test_optimize_indexing_dtype(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return aten.upsample_bilinear2d.vec(x, None, True, [2.0, 2.0])
fn_opt = torch._dynamo.optimize("inductor")(fn)
inps = [torch.randn(2, 4, 16, 16, device="cuda")]
code = run_and_get_triton_code(fn_opt, *inps)
self.assertTrue("to(tl.int32)" in code)
self.assertFalse("to(tl.int64)" in code)
self.assertEqual(fn_opt(*inps), fn(*inps))
# See https://github.com/pytorch/pytorch/issues/100348
def test_inductor_detach_view(self):
def fn(x: torch.Tensor) -> torch.Tensor:
a = x * 2
return a, a.detach()
fn_opt = torch._dynamo.optimize("inductor")(fn)
inp = torch.ones(2, 2, requires_grad=True, device="cuda")
inp_ref = inp.clone().detach().requires_grad_(True)
out_ref = fn(inp_ref)
out = fn_opt(inp)
out_ref[0].sum().backward()
out[0].sum().backward()
self.assertEqual(inp.grad, inp_ref.grad)
def test_not_materialize_pointwise_reduction(self):
def fn(a, b):
return (a - b).sum(dim=-1).amax(dim=-1)
N = 16
K = 7
fn_opt = torch._dynamo.optimize("inductor")(fn)
inps = [
torch.randn(N, 1, K, device="cuda"),
torch.randn(1, N, K, device="cuda"),
]
code = run_and_get_triton_code(fn_opt, *inps)
self.assertEqual(code.count("tl.store"), 1)
self.assertTrue("out_ptr1" in code)
self.assertFalse("out_ptr0" in code)
self.assertEqual(fn_opt(*inps), fn(*inps))
# Disable constant propagation, so we isolate value range analysis
@patch.object(config, "constant_and_index_propagation", False)
def test_cant_optimize_compute(self):
def ones():
return torch.ones([4], device="cuda")
def suffix(inp):
return (inp.to(torch.int64) + 1).to(torch.float64)
ten = torch.rand([4], device="cuda")
for foo in (
lambda x: x + 2147483657,
lambda x: torch.where(x < 0, ones(), ones() - 2) * (-(2 ** (40))),
lambda x: x + ten,
lambda x: x + ten.sum(),
):
def fn():
return suffix(foo(ones()))
fn_opt = torch._dynamo.optimize("inductor")(fn)
code = run_and_get_triton_code(fn_opt)
# this cannot be optimized away, value too large
self.assertTrue("to(tl.int64)" in code)
self.assertEqual(fn_opt(), fn())
# Disable constant propagation, so we isolate value range analysis
@patch.object(config, "constant_and_index_propagation", False)
def test_optimize_compute(self):
def ones():
return torch.ones([4], device="cuda")
def suffix(inp):
return (inp.to(torch.int64) + 1).to(torch.float64)
for foo in (
lambda x: x + 500,
lambda x: torch.where(x < 0, ones(), ones() - 2) * (-(2 ** (20))),
lambda x: x / 30,
):
def fn():
return suffix(foo(ones()))
fn_opt = torch._dynamo.optimize("inductor")(fn)
code = run_and_get_triton_code(fn_opt)
# this can be optimized away, value too large
self.assertTrue("to(tl.int64)" not in code)
self.assertTrue("to(tl.int32)" in code)
self.assertEqual(fn_opt(), fn())
# Disable index propagation, so the indirect indexing isn't optimized away
@patch.object(config, "constant_and_index_propagation", False)
def test_computed_indirect_mask(self):
def fn(x, n):
tmp = torch.arange(n, device=x.device)
return x[tmp] + 1
x = torch.randn(8, device="cuda")
fn_opt = torch.compile(fn)
code = run_and_get_triton_code(fn_opt, x, 8)
# load should be masked
self.assertTrue("tl.load(in_ptr0 + (tmp0), xmask)" in code)
self.assertEqual(fn(x, 8), fn_opt(x, 8))
def test_kernel_names_descriptive(self):
@torch._dynamo.optimize("inductor")
def fn1(x):
return x.cos().sin()
@torch._dynamo.optimize("inductor")
def fn2(x):
x = torch.mm(x, x)
x = torch.softmax(x, dim=1)
return x
mod = nn.Sequential(
nn.Linear(4, 4),
nn.LayerNorm(4),
nn.ReLU(),
).cuda()
@torch._dynamo.optimize("inductor")
def fn3(x):
return mod(x)
func_and_kernel_aten = [
(fn1, "triton_poi_fused_cos_sin", (torch.randn(8, device="cuda"),)),
(fn2, "triton_poi_fused__softmax", (torch.randn(4, 4, device="cuda"),)),
(
fn3,
"triton_poi_fused_native_layer_norm_relu",
(torch.randn(4, 4, device="cuda"),),
),
]
func_and_kernel_torch = [
(fn1, "triton_poi_fused_cos_sin", (torch.randn(8, device="cuda"),)),
(fn2, "triton_poi_fused_softmax", (torch.randn(4, 4, device="cuda"),)),
(
fn3,
"triton_poi_fused_LayerNorm_ReLU",
(torch.randn(4, 4, device="cuda"),),
),
]
def test_funcs(func_and_kernel):
with torch.no_grad():
for fn, kernel_name, inps in func_and_kernel:
code = run_and_get_triton_code(fn, *inps)
if kernel_name not in code:
print(code)
self.assertTrue(kernel_name in code)
test_funcs(func_and_kernel_aten)
patch.object(config.triton, "descriptive_names", "torch")(test_funcs)(
func_and_kernel_torch
)
@patch.object(config, "profile_bandwidth", True)
def test_bandwidth_profiler(self):
@torch._dynamo.optimize("inductor")
def fn(x):
x = x.cos()
x = x.cos()
x = torch.mm(x, x)
x = x.sin()
x = x.relu()
return x
inp = torch.randn(4, 4, device="cuda")
code = run_and_get_triton_code(fn, inp)
fn(inp)
self.assertTrue("start_graph" in code)
self.assertTrue("end_graph" in code)
def test_split_op_with_sym(self):
def fn(x: torch.Tensor) -> torch.Tensor:
# split(tensor, sympy.Integer), split(tensor, sympy.Expr)
return torch.split(x, x.shape[0]), torch.split(x, x.shape[0] // 2)
for dynamic_shapes in [True, False]:
with torch._dynamo.config.patch(dynamic_shapes=dynamic_shapes):
torch._dynamo.reset()
fn_opt = torch._dynamo.optimize("inductor", dynamic=dynamic_shapes)(
fn
)
inps = torch.randn([5, 5])
fn_opt(inps)
@skipIfRocm
def test_indirect_device_assert(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
test_path = os.path.join(dir_path, "indirect_assert_helper.py")
fns = ("first_arg", "store", "second_arg", "same_pm_one", "same_pp_one")
for fn, ndims, dyn_shape in itertools.product(fns, (2, 3), (True, False)):
proc = subprocess.Popen(
[
sys.executable,
test_path,
fn,
str(ndims),
str(dyn_shape),
"False",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stderr = proc.communicate()[1]
self.assertTrue(
any(
"index out of bounds" in err.decode("utf-8")
for err in stderr.splitlines()
),
f"{fn}, {ndims}, {dyn_shape}, False",
)
proc = subprocess.Popen(
[sys.executable, test_path, "first_arg", "2", "False", "True"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stderr = proc.communicate()[1]
self.assertTrue(
any(
"index out of bounds" in err.decode("utf-8")
for err in stderr.splitlines()
),
"first_arg 2 False True",
)
class RNNTest(TestCase):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.gru = torch.nn.GRU(16, 16, batch_first=True)
def forward(self, x):
return self.gru(x)
def test_rnn_compile_safe(self):
device = torch.device("cuda")
model = RNNTest.Model().to(device)
model = torch._dynamo.optimize("inductor")(model)
x = torch.rand(1024, 20, 16).to(device)
model(x)
if HAS_CPU:
class TestFull(TestCase):
def test_full_dtype(self):
pytypes = (
bool,
int,
float,
# TODO: Triton's JITFunction._type_of has no support for complex
# complex,
)
dtypes = (
torch.bool,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
None,
# torch.complex64,
# torch.complex128,
)
def fn(pytype, dtype):
if pytype is bool:
fill_value = True
elif pytype is int:
fill_value = 42
elif pytype is float:
fill_value = 42.0
else:
raise AssertionError(f"Unexpected Python type: {pytype}")
return torch.full(
(4, 6), fill_value, dtype=dtype, device=torch.device("cpu")
)
fn_opt = torch._dynamo.optimize("inductor")(fn)
for pytype, dtype in itertools.product(pytypes, dtypes):
with enable_python_dispatcher():
with torch.no_grad():
ret_opt = fn_opt(pytype, dtype)
self.assertEqual(ret_opt, fn(pytype, dtype))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
if HAS_CPU or HAS_CUDA:
run_tests(needs="filelock")
| [
"[email protected]"
] | |
11674a3f0d3e56f5156c92dbc2833e200511d2f2 | a38180435ac5786185c0aa48891c0aed0ab9d72b | /S4/S4 Library/simulation/situations/complex/single_job_situation.py | 567ee8821eebf3d57e66f878dfe22edeb8aac7d7 | [
"CC-BY-4.0"
] | permissive | NeonOcean/Environment | e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d | ca658cf66e8fd6866c22a4a0136d415705b36d26 | refs/heads/master | 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 | CC-BY-4.0 | 2022-11-22T20:24:59 | 2019-03-28T00:38:17 | Python | UTF-8 | Python | false | false | 1,366 | py | from role.role_state import RoleState
from sims4.tuning.tunable import TunableTuple
from situations.situation import Situation
from situations.situation_complex import SituationComplexCommon, SituationState, SituationStateData
from situations.situation_job import SituationJob
class SingleJobSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'job': TunableTuple(description='\n The job and role which the career Sim is placed into.\n ', situation_job=SituationJob.TunableReference(description='\n A reference to a SituationJob that can be performed at this Situation.\n '), role_state=RoleState.TunableReference(description='\n A role state the Sim assigned to the job will perform.\n '))}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, SingleJobSituationState),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.job.situation_job, cls.job.role_state)]
@classmethod
def default_job(cls):
return cls.job.situation_job
def start_situation(self):
super().start_situation()
self._change_state(SingleJobSituationState())
class SingleJobSituationState(SituationState):
pass
| [
"[email protected]"
] | |
c88bbd34f0f67cb174f84f0b4cff4aa4f6cd855c | 3969f8402eaa015eb850e041e3dede4978ab9a4c | /pkg/eventlet-0.12.1/tests/patcher_psycopg_test.py | 80988e51fdde3eb2a4aa7f68b40ee7bb7f24f738 | [
"MIT"
] | permissive | seewindcn/pycocos2d | e333bf8ae29d8244e6540ed3d39d76d4002e2908 | b88c8c5df127f9bf82f62c8b4365f4babcdee105 | refs/heads/master | 2023-03-07T10:07:47.167364 | 2013-06-03T10:45:19 | 2013-06-03T10:45:19 | 9,958,133 | 18 | 7 | null | 2013-05-14T03:57:47 | 2013-05-09T11:43:46 | C | UTF-8 | Python | false | false | 1,811 | py | import os
from tests import patcher_test, skip_unless
from tests import get_database_auth
from tests.db_pool_test import postgres_requirement
psycopg_test_file = """
import os
import sys
import eventlet
eventlet.monkey_patch()
from eventlet import patcher
if not patcher.is_monkey_patched('psycopg'):
print "Psycopg not monkeypatched"
sys.exit(0)
count = [0]
def tick(totalseconds, persecond):
for i in xrange(totalseconds*persecond):
count[0] += 1
eventlet.sleep(1.0/persecond)
dsn = os.environ['PSYCOPG_TEST_DSN']
import psycopg2
def fetch(num, secs):
conn = psycopg2.connect(dsn)
cur = conn.cursor()
for i in range(num):
cur.execute("select pg_sleep(%s)", (secs,))
f = eventlet.spawn(fetch, 2, 1)
t = eventlet.spawn(tick, 2, 100)
f.wait()
assert count[0] > 100, count[0]
print "done"
"""
class PatchingPsycopg(patcher_test.ProcessBase):
@skip_unless(postgres_requirement)
def test_psycopg_patched(self):
if 'PSYCOPG_TEST_DSN' not in os.environ:
# construct a non-json dsn for the subprocess
psycopg_auth = get_database_auth()['psycopg2']
if isinstance(psycopg_auth,str):
dsn = psycopg_auth
else:
dsn = " ".join(["%s=%s" % (k,v) for k,v, in psycopg_auth.iteritems()])
os.environ['PSYCOPG_TEST_DSN'] = dsn
self.write_to_tempfile("psycopg_patcher", psycopg_test_file)
output, lines = self.launch_subprocess('psycopg_patcher.py')
if lines[0].startswith('Psycopg not monkeypatched'):
print "Can't test psycopg2 patching; it's not installed."
return
# if there's anything wrong with the test program it'll have a stack trace
self.assert_(lines[0].startswith('done'), output)
| [
"none@none"
] | none@none |
35553d9c5dd5cafe84cfa94fd0c31fdeb3e4b8b7 | 4fc1037af17efa358be6cd886fcfd67c5272e93e | /httpx/_main.py | 7bd6b90846ee1d61fc636a7c077b09bbec5b947f | [
"BSD-3-Clause"
] | permissive | hugovk/httpx | d7e9f6bd463c22f454f31f4065babb53427bd73f | 43a1c1c8269cf56a016891aead091acbc3408e81 | refs/heads/master | 2023-01-09T08:55:42.929163 | 2022-03-08T10:53:15 | 2022-03-08T10:53:15 | 203,822,426 | 0 | 0 | BSD-3-Clause | 2023-09-11T10:32:59 | 2019-08-22T15:26:18 | Python | UTF-8 | Python | false | false | 15,506 | py | import functools
import json
import sys
import typing
import click
import httpcore
import pygments.lexers
import pygments.util
import rich.console
import rich.markup
import rich.progress
import rich.syntax
import rich.table
from ._client import Client
from ._exceptions import RequestError
from ._models import Response
from ._status_codes import codes
def print_help() -> None:
console = rich.console.Console()
console.print("[bold]HTTPX :butterfly:", justify="center")
console.print()
console.print("A next generation HTTP client.", justify="center")
console.print()
console.print(
"Usage: [bold]httpx[/bold] [cyan]<URL> [OPTIONS][/cyan] ", justify="left"
)
console.print()
table = rich.table.Table.grid(padding=1, pad_edge=True)
table.add_column("Parameter", no_wrap=True, justify="left", style="bold")
table.add_column("Description")
table.add_row(
"-m, --method [cyan]METHOD",
"Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.\n"
"[Default: GET, or POST if a request body is included]",
)
table.add_row(
"-p, --params [cyan]<NAME VALUE> ...",
"Query parameters to include in the request URL.",
)
table.add_row(
"-c, --content [cyan]TEXT", "Byte content to include in the request body."
)
table.add_row(
"-d, --data [cyan]<NAME VALUE> ...", "Form data to include in the request body."
)
table.add_row(
"-f, --files [cyan]<NAME FILENAME> ...",
"Form files to include in the request body.",
)
table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.")
table.add_row(
"-h, --headers [cyan]<NAME VALUE> ...",
"Include additional HTTP headers in the request.",
)
table.add_row(
"--cookies [cyan]<NAME VALUE> ...", "Cookies to include in the request."
)
table.add_row(
"--auth [cyan]<USER PASS>",
"Username and password to include in the request. Specify '-' for the password to use "
"a password prompt. Note that using --verbose/-v will expose the Authorization "
"header, including the password encoding in a trivially reversible format.",
)
table.add_row(
"--proxy [cyan]URL",
"Send the request via a proxy. Should be the URL giving the proxy address.",
)
table.add_row(
"--timeout [cyan]FLOAT",
"Timeout value to use for network operations, such as establishing the connection, "
"reading some data, etc... [Default: 5.0]",
)
table.add_row("--follow-redirects", "Automatically follow redirects.")
table.add_row("--no-verify", "Disable SSL verification.")
table.add_row(
"--http2", "Send the request using HTTP/2, if the remote server supports it."
)
table.add_row(
"--download [cyan]FILE",
"Save the response content as a file, rather than displaying it.",
)
table.add_row("-v, --verbose", "Verbose output. Show request as well as response.")
table.add_row("--help", "Show this message and exit.")
console.print(table)
def get_lexer_for_response(response: Response) -> str:
content_type = response.headers.get("Content-Type")
if content_type is not None:
mime_type, _, _ = content_type.partition(";")
try:
return pygments.lexers.get_lexer_for_mimetype(mime_type.strip()).name
except pygments.util.ClassNotFound: # pragma: nocover
pass
return "" # pragma: nocover
def format_request_headers(request: httpcore.Request, http2: bool = False) -> str:
version = "HTTP/2" if http2 else "HTTP/1.1"
headers = [
(name.lower() if http2 else name, value) for name, value in request.headers
]
method = request.method.decode("ascii")
target = request.url.target.decode("ascii")
lines = [f"{method} {target} {version}"] + [
f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers
]
return "\n".join(lines)
def format_response_headers(
http_version: bytes,
status: int,
reason_phrase: typing.Optional[bytes],
headers: typing.List[typing.Tuple[bytes, bytes]],
) -> str:
version = http_version.decode("ascii")
reason = (
codes.get_reason_phrase(status)
if reason_phrase is None
else reason_phrase.decode("ascii")
)
lines = [f"{version} {status} {reason}"] + [
f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers
]
return "\n".join(lines)
def print_request_headers(request: httpcore.Request, http2: bool = False) -> None:
console = rich.console.Console()
http_text = format_request_headers(request, http2=http2)
syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
def print_response_headers(
http_version: bytes,
status: int,
reason_phrase: typing.Optional[bytes],
headers: typing.List[typing.Tuple[bytes, bytes]],
) -> None:
console = rich.console.Console()
http_text = format_response_headers(http_version, status, reason_phrase, headers)
syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)
console.print(syntax)
def print_response(response: Response) -> None:
console = rich.console.Console()
lexer_name = get_lexer_for_response(response)
if lexer_name:
if lexer_name.lower() == "json":
try:
data = response.json()
text = json.dumps(data, indent=4)
except ValueError: # pragma: nocover
text = response.text
else:
text = response.text
syntax = rich.syntax.Syntax(text, lexer_name, theme="ansi_dark", word_wrap=True)
console.print(syntax)
else:
console.print(f"<{len(response.content)} bytes of binary data>")
def format_certificate(cert: dict) -> str: # pragma: nocover
lines = []
for key, value in cert.items():
if isinstance(value, (list, tuple)):
lines.append(f"* {key}:")
for item in value:
if key in ("subject", "issuer"):
for sub_item in item:
lines.append(f"* {sub_item[0]}: {sub_item[1]!r}")
elif isinstance(item, tuple) and len(item) == 2:
lines.append(f"* {item[0]}: {item[1]!r}")
else:
lines.append(f"* {item!r}")
else:
lines.append(f"* {key}: {value!r}")
return "\n".join(lines)
def trace(name: str, info: dict, verbose: bool = False) -> None:
console = rich.console.Console()
if name == "connection.connect_tcp.started" and verbose:
host = info["host"]
console.print(f"* Connecting to {host!r}")
elif name == "connection.connect_tcp.complete" and verbose:
stream = info["return_value"]
server_addr = stream.get_extra_info("server_addr")
console.print(f"* Connected to {server_addr[0]!r} on port {server_addr[1]}")
elif name == "connection.start_tls.complete" and verbose: # pragma: nocover
stream = info["return_value"]
ssl_object = stream.get_extra_info("ssl_object")
version = ssl_object.version()
cipher = ssl_object.cipher()
server_cert = ssl_object.getpeercert()
alpn = ssl_object.selected_alpn_protocol()
console.print(f"* SSL established using {version!r} / {cipher[0]!r}")
console.print(f"* Selected ALPN protocol: {alpn!r}")
if server_cert:
console.print("* Server certificate:")
console.print(format_certificate(server_cert))
elif name == "http11.send_request_headers.started" and verbose:
request = info["request"]
print_request_headers(request, http2=False)
elif name == "http2.send_request_headers.started" and verbose: # pragma: nocover
request = info["request"]
print_request_headers(request, http2=True)
elif name == "http11.receive_response_headers.complete":
http_version, status, reason_phrase, headers = info["return_value"]
print_response_headers(http_version, status, reason_phrase, headers)
elif name == "http2.receive_response_headers.complete": # pragma: nocover
status, headers = info["return_value"]
http_version = b"HTTP/2"
reason_phrase = None
print_response_headers(http_version, status, reason_phrase, headers)
def download_response(response: Response, download: typing.BinaryIO) -> None:
console = rich.console.Console()
console.print()
content_length = response.headers.get("Content-Length")
with rich.progress.Progress(
"[progress.description]{task.description}",
"[progress.percentage]{task.percentage:>3.0f}%",
rich.progress.BarColumn(bar_width=None),
rich.progress.DownloadColumn(),
rich.progress.TransferSpeedColumn(),
) as progress:
description = f"Downloading [bold]{rich.markup.escape(download.name)}"
download_task = progress.add_task(
description,
total=int(content_length or 0),
start=content_length is not None,
)
for chunk in response.iter_bytes():
download.write(chunk)
progress.update(download_task, completed=response.num_bytes_downloaded)
def validate_json(
ctx: click.Context,
param: typing.Union[click.Option, click.Parameter],
value: typing.Any,
) -> typing.Any:
if value is None:
return None
try:
return json.loads(value)
except json.JSONDecodeError: # pragma: nocover
raise click.BadParameter("Not valid JSON")
def validate_auth(
ctx: click.Context,
param: typing.Union[click.Option, click.Parameter],
value: typing.Any,
) -> typing.Any:
if value == (None, None):
return None
username, password = value
if password == "-": # pragma: nocover
password = click.prompt("Password", hide_input=True)
return (username, password)
def handle_help(
ctx: click.Context,
param: typing.Union[click.Option, click.Parameter],
value: typing.Any,
) -> None:
if not value or ctx.resilient_parsing:
return
print_help()
ctx.exit()
@click.command(add_help_option=False)
@click.argument("url", type=str)
@click.option(
"--method",
"-m",
"method",
type=str,
help=(
"Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. "
"[Default: GET, or POST if a request body is included]"
),
)
@click.option(
"--params",
"-p",
"params",
type=(str, str),
multiple=True,
help="Query parameters to include in the request URL.",
)
@click.option(
"--content",
"-c",
"content",
type=str,
help="Byte content to include in the request body.",
)
@click.option(
"--data",
"-d",
"data",
type=(str, str),
multiple=True,
help="Form data to include in the request body.",
)
@click.option(
"--files",
"-f",
"files",
type=(str, click.File(mode="rb")),
multiple=True,
help="Form files to include in the request body.",
)
@click.option(
"--json",
"-j",
"json",
type=str,
callback=validate_json,
help="JSON data to include in the request body.",
)
@click.option(
"--headers",
"-h",
"headers",
type=(str, str),
multiple=True,
help="Include additional HTTP headers in the request.",
)
@click.option(
"--cookies",
"cookies",
type=(str, str),
multiple=True,
help="Cookies to include in the request.",
)
@click.option(
"--auth",
"auth",
type=(str, str),
default=(None, None),
callback=validate_auth,
help=(
"Username and password to include in the request. "
"Specify '-' for the password to use a password prompt. "
"Note that using --verbose/-v will expose the Authorization header, "
"including the password encoding in a trivially reversible format."
),
)
@click.option(
"--proxies",
"proxies",
type=str,
default=None,
help="Send the request via a proxy. Should be the URL giving the proxy address.",
)
@click.option(
"--timeout",
"timeout",
type=float,
default=5.0,
help=(
"Timeout value to use for network operations, such as establishing the "
"connection, reading some data, etc... [Default: 5.0]"
),
)
@click.option(
"--follow-redirects",
"follow_redirects",
is_flag=True,
default=False,
help="Automatically follow redirects.",
)
@click.option(
"--no-verify",
"verify",
is_flag=True,
default=True,
help="Disable SSL verification.",
)
@click.option(
"--http2",
"http2",
type=bool,
is_flag=True,
default=False,
help="Send the request using HTTP/2, if the remote server supports it.",
)
@click.option(
"--download",
type=click.File("wb"),
help="Save the response content as a file, rather than displaying it.",
)
@click.option(
"--verbose",
"-v",
type=bool,
is_flag=True,
default=False,
help="Verbose. Show request as well as response.",
)
@click.option(
"--help",
is_flag=True,
is_eager=True,
expose_value=False,
callback=handle_help,
help="Show this message and exit.",
)
def main(
url: str,
method: str,
params: typing.List[typing.Tuple[str, str]],
content: str,
data: typing.List[typing.Tuple[str, str]],
files: typing.List[typing.Tuple[str, click.File]],
json: str,
headers: typing.List[typing.Tuple[str, str]],
cookies: typing.List[typing.Tuple[str, str]],
auth: typing.Optional[typing.Tuple[str, str]],
proxies: str,
timeout: float,
follow_redirects: bool,
verify: bool,
http2: bool,
download: typing.Optional[typing.BinaryIO],
verbose: bool,
) -> None:
"""
An HTTP command line client.
Sends a request and displays the response.
"""
if not method:
method = "POST" if content or data or files or json else "GET"
try:
with Client(
proxies=proxies,
timeout=timeout,
verify=verify,
http2=http2,
) as client:
with client.stream(
method,
url,
params=list(params),
content=content,
data=dict(data),
files=files, # type: ignore
json=json,
headers=headers,
cookies=dict(cookies),
auth=auth,
follow_redirects=follow_redirects,
extensions={"trace": functools.partial(trace, verbose=verbose)},
) as response:
if download is not None:
download_response(response, download)
else:
response.read()
if response.content:
print_response(response)
except RequestError as exc:
console = rich.console.Console()
console.print(f"[red]{type(exc).__name__}[/red]: {exc}")
sys.exit(1)
sys.exit(0 if response.is_success else 1)
| [
"[email protected]"
] | |
d58ae3c7d5f559290e4ad6aba0e009878635ebe6 | 625daac7e73b98935f9fe93e647eb809b48b712e | /Arcade/Intro/adjacentElementsProduct.py | 07c848e4a7c18445ca0d1d6cf05d6044c620be21 | [] | no_license | aleksaa01/codefights-codesignal | 19b2d70779cc60f62511b6f88ae5d049451eac82 | a57a5589ab2c9d9580ef44900ea986c826b23051 | refs/heads/master | 2022-03-15T04:46:40.356440 | 2019-12-08T15:41:37 | 2019-12-08T15:41:37 | 112,034,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | def adjacentElementsProduct(arr):
max_pair = arr[0]*arr[1]
for i in range(1, len(arr)-1):
if arr[i]*arr[i+1] > max_pair:
max_pair = arr[i]*arr[i+1]
return max_pair
"""
Given an array of integers, find the pair of adjacent elements that has the
largest product and return that product.
Example
For inputArray = [3, 6, -2, -5, 7, 3], the output should be
adjacentElementsProduct(inputArray) = 21.
7 and 3 produce the largest product.
"""
| [
"[email protected]"
] | |
c6492508982755a4e1e8b20b63f7fa75931cdd05 | fbd4ecf7046171c4e96267c5982c964db54578f5 | /business/p201904/110111_2300/server.py | 0872fb79fab8ceec11b2a306e6bc2a815aee5719 | [] | no_license | Alvin2580du/alvin_py | 6dddcfbfae214694e9f3dafd976101e681f2a66d | 82d3e9808073f2145b039ccf464c526cb85274e3 | refs/heads/master | 2021-05-05T16:01:43.544783 | 2019-10-29T02:23:59 | 2019-10-29T02:23:59 | 117,328,713 | 12 | 2 | null | 2021-03-20T00:06:37 | 2018-01-13T08:51:49 | Python | UTF-8 | Python | false | false | 3,200 | py | import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
import sys
from gensim.models.word2vec import Word2Vec
import numpy as np
import jieba
from sklearn.externals import joblib
ports = sys.argv[1]
define("port", default=ports, help="run on the given port", type=int)
# 加载模型
imdb_w2v = Word2Vec.load('w2v_model.pkl')
clf = joblib.load('svm_model.pkl')
# 对每个句子的所有词向量取均值,来生成一个句子的vector
def build_sentence_vector(text, size, imdb_w2v):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in text:
try:
vec += imdb_w2v[word].reshape((1, size))
count += 1.
except KeyError:
continue
if count != 0:
vec /= count
return vec
# 构建待预测句子的向量
def get_predict_vecs(words, n_dim=300):
train_vecs = build_sentence_vector(words, n_dim, imdb_w2v)
return train_vecs
# 对单个句子进行情感判断
def svm_predict(string):
words = jieba.lcut(string)
words_vecs = get_predict_vecs(words)
result = clf.predict(words_vecs)
if int(result[0]) == 1:
return "positive"
else:
return "negative"
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class UserHandler(tornado.web.RequestHandler):
def post(self):
message = self.get_argument("message")
print("输入的句子是:{}".format(message))
res = svm_predict(message)
self.render("message.html", message="{}的情感极性是:\n{}".format(message, res))
handlers = [
(r"/", IndexHandler),
(r"/user", UserHandler)
]
if __name__ == "__main__":
""" 测试句子
坐牢,怎么可能轻易放过
把携程亲子园所有的老师全部全家处死一个不留
妈呀,光看视频就已经哭的不行,这些人还有没有人性啊,希望法律严惩,给家长们一个交代。
认错已经不是原谅的理由,必须严惩,孩子的伤害是无法弥补的
中国改改法律吧,就是因为他们以前这种幼师犯罪判个一两年就了事,才有这么多人更甚,最少十年以上,严重判死刑,看有几个还敢的
真应该给这些人判死刑啊
真的是心疼到无法呼吸!!!!!啊啊啊啊啊啊妈的比
没有职业道德就不用当幼师,承受不了孩子的吵闹各种调皮就不要当幼师,真的别当幼师,你都没爱心了,何必去当幼师,可怜的孩子遇见你真的是很可怜
打死都不可惜
我也是位母亲,看到这样的视频,真的是很揪心
简直不配做人!简直无法理解!谁招来的这畜生也得负责任吧!不,畜生都比她强!
这种人希望被国家拉黑
"""
template_path = os.path.join(os.path.dirname(__file__), "template")
tornado.options.parse_command_line()
app = tornado.web.Application(handlers, template_path)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| [
"[email protected]"
] | |
f43b801df2a2396b5627c17b19e71a5d8c8eeef8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_258/ch30_2019_03_10_21_05_56_563616.py | c59f95da9501a2311605f0176a60d8e35f2f4a9f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | import math
v=int(input('Qual a velocidade do lançamento? '))
a=int(input('Qual o ângulo do lançamento? '))
d=(v**2)*math.sin(2*math.degrees(a))/9.8
if d<96:
print('Muito perto')
elif d>104:
print('Muito longe')
else:
print('Acertou!')
| [
"[email protected]"
] | |
82de4cfcd9dd16a9de9e20740c96e2672531521a | 4cbc8b81d197bc392d1b57856254300331b9738f | /python/apt.py | e87a96e500abda1744a02b482a2c973b8f718a19 | [
"MIT"
] | permissive | vcatafesta/chili | 87b9606f17cda645ba44cbf2bb4cc4637e18d211 | 5c734ac88454db76eb2f4e92c13364a5bbc7a93a | refs/heads/main | 2023-09-01T01:39:09.457448 | 2023-08-29T21:23:28 | 2023-08-29T21:23:28 | 171,972,556 | 2 | 2 | null | 2019-02-22T01:38:49 | 2019-02-22T01:26:46 | null | UTF-8 | Python | false | false | 15,595 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# License: MIT #
#url = 'http://mazonos.com/packages/' # Official Repository
url = 'http://localhost/packages/' # Official Repository
#url = 'https://github.com/vcatafesta/ChiliOS/tree/master/' # Official Repository
#filecsv = '/var/lib/fetch/database.db' # Repository package list
filecsv = 'database.db' # Repository package list
dircsv = '/var/lib/fetch/' # Folder for the .csv file
dirlist = '/var/lib/banana/list/' # Folder for the .list file
PRG = '.chi.zst'
#PRG = '.mz'
# Flags
found = False
done = False
fi = ""
rof = ""
end = ""
endef = ""
next = ""
#define
PKG_FULLNAME = 0
PKG_ARCH = 1
PKG_BASE = 2
PKG_BASE_VERSION = 3
PKG_VERSION = 4
PKG_BUILD = 5
#Strings
choose = ''
textAnimate = ''
please_connect = 'No route to server! Check your internet connection!'
package_not_found = 'Package not found.'
# Arrays
switches = [
's', 'search',
'i', 'install',
'r', 'remove',
'w', 'show',
'u', 'update',
'g', 'upgrade',
'c', 'checkdesc'
]
modules = ['requests', 'requests_html', 'bs4', 'argparse', 'consolecolor', 'sty']
import sys
import os
import csv
import threading
import itertools
import time
import re
for module in modules:
try:
__import__(module)
except Exception as e:
print('Installing modules...')
os.system('pip3 install ' + str(module))
# os.system('clear')
end
import requests
import requests_html
import bs4
from urllib.request import urlopen
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from consolecolor import FontColor
from consolecolor import Colors
from sty import fg, bg, ef, rs, RgbFg
fg.set_style('orange', RgbFg(255, 150, 50))
def version():
print(''' __ _ _
/ _| ___| |_ ___| |__ Copyright (C) 2019-2020 Vilmar Catafesta <[email protected]>
| |_ / _ \ __/ __| '_ \
| _| __/ || (__| | | | Este programa pode ser redistribuído livremente
|_| \___|\__\___|_| |_| sob os termos da Licença Pública Geral GNU.
fetch 1.00.00.20200817
''')
def check():
#Checks if the folder exists
if not os.path.isdir(dircsv):
os.mkdir(dircsv)
print('Created folder ' + dircsv + '.')
fi
# Checks if the file exists
if not os.path.isfile(filecsv):
os.system('touch ' + filecsv)
print('Created file ' + filecsv + '.')
os.system('clear')
update()
fi
def main():
try:
sys.argv[1]
except IndexError:
usage()
exit(1)
else:
global choose
choose = str(sys.argv[1])
def usage():
print('erro: nenhuma operação especificada (use -h para obter ajuda)')
def usage():
print('''uso: fetch <operação> [...]
operações:
fetch {-h --help}
fetch {-V --version}
fetch {-D --database} <opções> <pacote(s)>
fetch {-F --files} [opções] [pacote(s)]
fetch {-Q --query} [opções] [pacote(s)]
fetch {-R --remove} [opções] <pacote(s)>
fetch {-S --sync} [opções] [pacote(s)]
fetch {-T --deptest} [opções] [pacote(s)]
fetch {-U --upgrade} [opções] <arquivo(s)>
use "fetch {-h --help}" com uma operação para ver as opções disponíveis
''')
def internet_on():
try:
response = urlopen(url, timeout=10)
return True
except:
return False
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
fi
sys.stdout.write('\r' + textAnimate + c)
sys.stdout.flush()
time.sleep(0.1)
next
sys.stdout.write('\r' + textAnimate + 'complete! \n')
def s():
search()
def search():
global found
if (len(sys.argv) >= 3):
package = str(sys.argv[2])
else:
package = None
fi
with open(filecsv, 'r') as f:
csv_reader = csv.reader(f)
count = 0
linepackage()
for line in csv_reader:
if package:
if package in line[0]:
count += 1
print("{}({:04d}) {}{:<30}{} {:<20} {:<40}".format(fg.green, count, fg.orange, line[0].replace(PRG, ''), fg.rs, line[1], line[4]))
found = True
fi
else:
count += 1
print("{}({:04d}) {}{:<30}{} {:<20} {:<40}".format(fg.green, count, fg.orange, line[0].replace(PRG, ''), fg.rs, line[1], line[4]))
found = True
fi
next
print('(' + str(count) + ') package(s) found.')
if not found:
print(package_not_found)
fi
end
def linepackage():
print(fg.cyan + ' Package version fullname' + fg.rs)
return
def i():
install()
def install():
if internet_on():
try:
sys.argv[2]
except IndexError:
usage()
exit(0)
else:
global found
package = str(sys.argv[2])
links = []
packages = []
with open(filecsv, 'r') as f:
csv_reader = csv.reader(f)
count = 0
linepackage()
for line in csv_reader:
if package in line[0]:
found = True
count += 1
print("{}({:04d}) {}{:<30}{} {:<20} {:<40}".format(fg.green, count, fg.orange, line[0].replace(PRG, ''), fg.rs, line[1], line[4]))
links.append(url + line[4])
packages.append(line[3])
fi
next
if found:
pkgcount = packages.__len__()
pkglist = ''
for p in packages:
pkglist += (p + ', ')
next
pkglist = pkglist[:-2] + '.'
# print(str(pkgcount) + ' packages found: ' + pkglist.replace(PRG, ''))
print()
install = input(':: Continue installation? [Y/n] : ')
if install == 'Y' or install == 'y':
for p in range(pkgcount):
cstr = 'curl'
cstr += ' -#'
cstr += ' -k'
cstr += ' -o /tmp/' + packages[p]
cstr += ' -O ' + links[p]
os.system(cstr)
os.system('(banana -i ' + '/tmp/' + packages[p] + '2>&1>/dev/null)')
# os.system('rm ' + '/tmp/' + packages[p])
rof
else:
exit(0)
fi
else: # if not found
print(package_not_found)
fi
end
end
else:
print(please_connect)
exit(0)
fi
def r():
remove()
def remove():
try:
sys.argv[2]
except IndexError:
usage()
exit(0)
else:
global found
onlyfiles = [f for f in os.listdir(dirlist) if os.path.isfile(os.path.join(dirlist, f))]
r = re.compile(sys.argv[2] + '.*')
newlist = list(filter(r.match, onlyfiles))
if newlist:
found = True
for pack in newlist:
package = pack.replace('.list', '')
remove = input('You like remove ' + package + '? [Y/n] : ')
if remove == 'y' or remove == 'Y':
os.system('banana remove ' + package + ' -y')
else:
exit(0)
if not found:
print(package_not_found)
def w():
show()
def show():
try:
sys.argv[2]
except IndexError:
usage()
exit(0)
else:
global found
package = str(sys.argv[2])
with open(filecsv, 'r') as f:
csv_reader = csv.reader(f)
for line in csv_reader:
if package in line[0]:
found = True
pkgname = line[0]
version = line[1]
internet = internet_on()
lDesc = False
if line[4]:
if internet:
r = requests.get(url + line[4] + '.desc' )
if not r.status_code == 404:
lDesc = True
text = r.text
fi
fi
fi
if not lDesc:
maintainer = '(unknown)'
desc = 'Description not available for this package!'
if not internet:
desc += '\n' + please_connect
fi
else:
maintainer = (re.findall('maintainer.*', text)[0]).replace("'", '').replace('maintainer=', '').replace('"', '')
desc = (re.findall('desc.*', text)[0]).replace("'", '').replace('desc=', '').replace('"', '')
#desc = ((text.split('|')[2]).replace('#', '').replace('=', '').replace('desc"', ''))[:-2]
fi
print( fg.cyan + text + fg.rs )
# print('Package Name: ' + pkgname)
# print('Version.....: ' + version)
# print('Maintainer..: ' + maintainer)
# print('Desc........: ' + desc)
# print('#' * 70)
fi
next
if not found:
print(package_not_found)
fi
end
end
def u():
update()
def pause( xVar ):
os.system('clear')
print( xVar )
resp = 'S'
resp = input('Continuar [Y/n] : ')
if resp == 'Y' or resp == 'y' or resp == 'S' or resp == 's':
return
fi
exit(1)
def update():
if internet_on():
global textAnimate
global done
if os.path.isfile(filecsv):
os.system('rm ' + filecsv)
fi
result = requests.get(url)
src = result.content
soup = BeautifulSoup(src, 'html.parser')
links = soup.find_all('a')
ntotalpkg = 0
for link in links:
if '../' in link.text:
continue
fi
if '/' in link.text:
urls = url + link.get('href')
result = requests.get(urls)
src = result.content
soup = BeautifulSoup(src, 'html.parser')
folders = soup.find_all('a')
folder = link.text
ncontapkg = 0
for l in folders:
pkg = l.get('href')
string = ''
if l.text.endswith((PRG)):
ncontapkg += 1
ntotalpkg += 1
string = pkg
pkgsplit = splitpkg(pkg)
with open(filecsv, 'a') as f:
csv_writer = csv.writer(f)
csv_writer.writerow([pkgsplit[PKG_BASE], pkgsplit[PKG_VERSION], pkgsplit[PKG_BUILD], string, folder+string])
end
fi
rof
print("{}::{}Updating... {}({:04d}) {}packages in {}{}{}".format(fg.cyan, fg.rs, fg.cyan, ncontapkg, fg.red, fg.yellow, link.get('href'),fg.rs))
fi
rof
print('')
print("{}({:04d}) {}packages{} in repo".format(fg.cyan, ntotalpkg, fg.red, fg.rs))
done = True
else:
print(please_connect)
fi
def splitpkg(cfilename):
cfullname = cfilename
pkg_arch = cfullname.replace('-any' + PRG,'')
pkg_arch = pkg_arch.replace('-x86_64' + PRG,'')
# pkg_arch = pkg_arch.replace(PRG,'')
carch = cfullname.replace(PRG,'')
csplit = pkg_arch.rsplit('-',2)
cbase = csplit[0]
cbase_version = cfilename.rsplit('-',1)[0]
cbuild = csplit[2]
cversion = csplit[1]
cversion += '-'
cversion += cbuild
return( cfullname, carch, cbase, cbase_version, cversion, cbuild)
def g():
upgrade()
def upgrade():
print('Upgrading...')
def c():
checkdesc()
def checkdesc():
if internet_on():
update()
global textAnimate
global done
found = False
nodescs = []
textAnimate = 'Searching '
t = threading.Thread(target=animate)
t.start()
with open(filecsv, 'r') as f:
csv_reader = csv.reader(f)
for line in csv_reader:
if line[2] == '':
found = True
nodescs.append(' ' + line[0] + line[1] + '.desc -> not found!')
fi
end
done = True
end
if found:
print('The following packages do not have the .desc file:')
for n in nodescs:
print(n)
else:
print('All packages are OK!')
fi
else:
print(please_connect)
exit(1)
fi
switches = [
's', 'search',
'i', 'install',
'r', 'remove',
'w', 'show',
'u', 'update',
'g', 'upgrade',
'c', 'checkdesc'
]
def indirect(i):
switcher={
'-h':usage, '--help':usage,
'-Sy':update,
'-Sw':show,
'-Si':install,
'-Su':upgrade,
'-R':remove,
'-Q':search,
'-V':version,
'-f':lambda: 'force',
'-y':lambda: 'auto'
}
func=switcher.get(i, lambda: 'invalid')
return func()
try:
check()
main()
# if choose in switches:
# functions = locals()
# functions[choose]()
# else:
# usage()
# print('Invalid \"' + choose + '\" operation!')
indirect(sys.argv[1])
except KeyboardInterrupt:
print('\n')
exit(0)
import sys
import argparse
#def main():
# parser = argparse.ArgumentParser(description='ChiliOS fetch') # (1)
# parser.add_argument('-Sy', nargs='*', type=str, default='', required=False, help='Sync') #(2)
# parser.add_argument('-Su', nargs='*', type=str, default='', required=False, help='Sync') #(2)
# parser.add_argument('-f', dest='force', nargs='*', type=str, default='', required=False, help='Sync') #(2)
# parser.add_argument('-y', dest='auto', nargs='*', type=str, default='', required=False, help='Sync') #(2)
# args = parser.parse_args() #(3)
# choose = 'Sy'
# if choose in switches:
# functions = locals()
# pause( functions )
# functions['update']()
# print("Sy={}".format(args.Sy)) # (4)
# print("Su={}".format(args.Su)) # (4)
# print(" f={}".format(args.force)) # (4)
# print(" y={}".format(args.auto)) # (4)
# return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
1f66c2f28360a924a6ad07d2b8c8af414203518d | 50f8d8975b1f17c4c6bcb9be29d4f0ed49cb42a5 | /Week_04/lemonade-change.py | 5e567df5df1217ba3921dd4a56cf9268dd95ae3f | [] | no_license | Jiangjao/-algorithm015 | 098491b7a9b80626c1d9e15a9125e4e460ee8668 | a6969617f4cde1d948cb064c1078d4d510140758 | refs/heads/master | 2023-01-10T17:42:49.495871 | 2020-11-16T07:35:04 | 2020-11-16T07:35:04 | 289,441,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | class Solution(object):
def lemonadeChange(self, bills):
"""
:type bills: List[int]
:rtype: bool
"""
five = ten = 0
for bill in bills:
if bill == 5:
five += 1
elif bill == 10:
if not five: return False
five -= 1
ten += 1
else:
if ten and five:
ten -= 1
five -= 1
elif five >= 3:
five -= 3
else:
return False
return True | [
"[email protected]"
] | |
5f249cf5e48d2382470baa0a978bc3a0abafafc6 | d2ca1ab6ed63983d1bd6497f26a63f0445451844 | /2015/05/fc_2015_05_31.py | dc41dc9178467c3c8859ffc19ce4fdb301b45b7d | [
"MIT"
] | permissive | mfwarren/FreeCoding | 96636367f4f4a53351535372c5691d7805199f23 | 58ac87f35ad2004a3514782556762ee0ed72c39a | refs/heads/master | 2021-01-19T14:30:09.057354 | 2015-07-05T05:59:53 | 2015-07-05T05:59:53 | 24,469,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | #!/usr/bin/env python3
# imports go here
#
# Free Coding session for 2015-05-31
# Written by Matt Warren
#
def factors(x):
values = []
cursor = x
i = 2
while i <= cursor:
v = cursor / i
if int(v) == v:
cursor = v
values.append(i)
else:
i += 1
return values
if __name__ == '__main__':
print(factors(302))
print(factors(304))
print(factors(30473456))
| [
"[email protected]"
] | |
5229abb6be00316ff90cd09e352230cb2bc258fe | a2d5681a37be0d3b0753a0e979cb4fa7b0398f32 | /indexedcorpus.py | 84aa2ea572d830a6ae74aed8e35b0c416de90ad2 | [] | no_license | stephenroller/class-nlp-project | f7c09281336985ac55d25e886e7aa180e2225580 | 0362ec1182dc6d3ab54990bbb097339e7bc386a0 | refs/heads/master | 2020-05-29T23:26:56.024802 | 2011-05-13T18:30:49 | 2011-05-13T18:30:49 | 1,606,152 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | #!/usr/bin/evn python
import sqlite3
import os
from itertools import groupby
from util import context_windows
class IndexedCorpus(object):
def __init__(self, indexfile, corpus_directory=''):
self.indexfile = indexfile
self.corpus_directory = corpus_directory
self.conn = sqlite3.connect(indexfile)
def get_unique_words(self):
c = self.conn.cursor()
c.execute('select word from words order by word');
for row in c:
yield row[0]
c.close()
def get_contexts(self, query):
c = self.conn.cursor()
c.execute('''
SELECT F.filename, WA.pos
FROM words AS W
JOIN word_appearances as WA ON (W.id = WA.word)
JOIN filenames AS F ON (WA.file = F.id)
WHERE W.word = ?
ORDER BY WA.file, WA.pos
''',
[query]
)
for filename, positions in groupby(c, lambda x: x[0]):
f = open(os.path.join(self.corpus_directory, filename))
for filename, position in positions:
f.seek(position)
line = f.readline().strip()
yield line
f.close()
c.close()
def __len__(self):
c = self.conn.cursor()
c.execute('select count(*) from words');
for row in c:
count = row[0]
c.close()
return count
| [
"[email protected]"
] | |
72de8eb136efd770ba9db06215d9ea846c6dd7c9 | ceb4ac75c40cd53f24d8f7e0a2f763de309bcfdb | /main4.py | 24ac83b28e32c4c98c305c1e1b012cf1ea9f8cf3 | [] | no_license | kevinelong/bo | c706d0771dbbf427a67d240f552eef4b7529b877 | e08e2d0e07e240cab440733173578f627e0f25ec | refs/heads/master | 2022-11-08T22:18:04.053714 | 2020-07-12T17:17:31 | 2020-07-12T17:17:31 | 279,112,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | class Coordinate:
def __init__(self, x, y):
self.x = x
self.y = y
class Size:
def __init__(self, width, height):
self.width = width
self.height = height
class Box:
def __init__(self, origin:Coordinate, box_size:Size):
self.origin = origin
self.size = box_size
class Item:
def __init__(self, name:str, location:Box):
self.name = name
self.location = location if location is not None else Box(Coordinate(0,0), Size(3,3))
def __str__(self):
return f"{self.name} {self.location}"
class World:
def __init__(self):
self.item_list = []
self.bounds = Box(Coordinate(-10,-10), Size(20,20))
def value_at(self,x,y):
pixel = Item("",Box(Coordinate(x,y),Size(1,1)))
for item in self.item_list:
if self.have_collided(pixel,item):
return item.name
return "."
def __str__(self):
rows = []
origin = self.bounds.origin
for r in range(0,self.bounds.size.height):
row = []
for c in range(0,self.bounds.size.width):
row.append(self.value_at(c + origin.x, r + origin.y))
rows.append(" ".join(row))
return "\n".join(rows)
def add_item(self, item):
self.item_list.append(item)
def have_collided(self, item1, item2):
if item1.location.origin.x + item1.location.size.width <= item2.location.origin.x:
return False
if item2.location.origin.x + item2.location.size.width <= item1.location.origin.x:
return False
if item1.location.origin.y + item1.location.size.width <= item2.location.origin.y:
return False
if item2.location.origin.y + item2.location.size.width <= item1.location.origin.y:
return False
return True
def get_collisions(self):
collisions = []
for item1 in self.item_list:
for item2 in self.item_list:
if item1 != item2 and (item2,item1) not in collisions:
if self.have_collided(item1,item2):
collisions.append((item1,item2))
return collisions
debugging = True
def log(text):
if debugging:
print(text)
w = World()
add_item = lambda item: w.add_item(item)
get_collisions = lambda : w.get_collisions()
# TESTS
add_item(Item("A",Box(Coordinate(0,0), Size(3,3))))
add_item(Item("B",Box(Coordinate(-3,-3), Size(4,4))))
print(w)
c = get_collisions()
log(c)
assert( len(c) == 1 )
| [
"[email protected]"
] | |
54b883b64ef60b20fe3d570fc00563c41892ba76 | 0bc8d6abec44e1187499f93803f82514f2b53fc6 | /Base/BaseReq1.py | fa5421d9d89f69e236d3949b433faf8e14ac7258 | [] | no_license | IamBiJav/auto_http_api | 932db2b4f2e1b67f2c0760806afd086494d92007 | 5a7ff01845e43d441fef8ae955b056085ab2dd10 | refs/heads/master | 2023-03-16T22:20:50.102610 | 2021-03-16T13:41:07 | 2021-03-16T13:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,054 | py | import requests
import json
import ast
from Base.BaseElementEnmu import Element
from Base.BaseParams import BaseFuzzParams
from Base.BaseStatistics import writeInfo
class Config(object):
def __init__(self, sessions):
self.sessions = sessions
def config_req(self, kw):
app = {}
header = {"Accept": "*/*", "Content-Type": "application/json;charset=utf-8"}
for item in kw:
url = "%s://%s" % (item["protocol"], item["url"])
print("==请求url:%s" % url)
print("==请求参数:%s" % item["params"])
params = "{}"
if item.get("params"):
params = item.get("params")
if item["method"] == "get":
res = self.sessions.get(url, data=json.dumps(ast.literal_eval(params)), headers=header, verify=False)
elif item["method"] == "post":
res = self.sessions.post(url, data=json.dumps(ast.literal_eval(params)), headers=header, verify=False)
else:
print("现在只针post和ge方法进行了测试,其他方法请自行扩展")
app["url"] = item["url"]
app["method"] = item["method"]
app["params"] = item["params"]
app["code"] = str(res.status_code)
app["msg"] = item["mark"]
app["hope"] = item.get("hope", "")
app["res"] = str(res.text)
app["ress"] = res # 传给检查函数进行解析
print("==响应结果:%s=" % app["res"])
app["result"] = self.__check(app["hope"], app["ress"])
print("==响应码:%s=" % app["code"])
writeInfo(app, Element.INFO_FILE)
def config_req_pict(self, kw, req=None):
app = {}
header = {"Accept": "*/*", "Content-Type": "application/json;charset=utf-8"}
for item in kw:
url = "%s://%s" % (item["protocol"], item["url"])
# 如果有参数才做模糊测试,没有做正向场景测试
if item.get("params"):
print("进行逆向场景测试")
params = BaseFuzzParams().param_fi(ast.literal_eval(item["params"]))
for i in params:
_info = ""
if i.get("info", "null") != "null":
_info = i.get("info", "参数正确")
i.pop("info")
if item["method"] == "get":
res = self.sessions.get(url, data=json.dumps(i), headers=header)
else:
res = self.sessions.post(url, data=json.dumps(i), headers=header)
app["url"] = item["url"]
app["method"] = item["method"]
app["params"] = str(i)
app["code"] = str(res.status_code)
app["msg"] = item["mark"] + "_" + _info
# app["hope"] = item.get("hope", "")
app["hope"] = ""
app["res"] = str(res.text)
app["result"] = ""
print("请求url:%s" % url)
print("请求参数:%s" % app["params"])
print("响应码:%s" % app["code"])
print("响应结果:%s" % app["res"])
writeInfo(app, Element.INFO_FILE)
else:
self.config_req(kw)
def __check(self, hope, res):
resp = json.dumps(json.loads(res.text), separators=(',', ':'))
is_check = 0 # 0表示期望值不存在,没有进行检查;1成功;-1失败
hopes = hope.split("|")
if len(hopes) and len(hope):
is_check = 1
# 循环检查期望值是否在实际值中能找到
for j in hopes:
if resp.find(j) == -1:
is_check = -1
break
if is_check == 0:
return "未检查"
elif is_check == 1:
return "成功"
else:
return "失败"
| [
"[email protected]"
] | |
9db0cb4a0ab5668893f4ed5fcb8d6a4515118cab | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/list_topics_item.py | 941aeb67db97af3db8459de64cdaa873c70458bf | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,993 | py | # coding: utf-8
import re
import six
class ListTopicsItem:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'topic_urn': 'str',
'name': 'str',
'display_name': 'str',
'push_policy': 'int',
'enterprise_project_id': 'str'
}
attribute_map = {
'topic_urn': 'topic_urn',
'name': 'name',
'display_name': 'display_name',
'push_policy': 'push_policy',
'enterprise_project_id': 'enterprise_project_id'
}
def __init__(self, topic_urn=None, name=None, display_name=None, push_policy=None, enterprise_project_id=None):
"""ListTopicsItem - a model defined in huaweicloud sdk"""
self._topic_urn = None
self._name = None
self._display_name = None
self._push_policy = None
self._enterprise_project_id = None
self.discriminator = None
self.topic_urn = topic_urn
self.name = name
self.display_name = display_name
self.push_policy = push_policy
self.enterprise_project_id = enterprise_project_id
@property
def topic_urn(self):
"""Gets the topic_urn of this ListTopicsItem.
Topic的唯一的资源标识。
:return: The topic_urn of this ListTopicsItem.
:rtype: str
"""
return self._topic_urn
@topic_urn.setter
def topic_urn(self, topic_urn):
"""Sets the topic_urn of this ListTopicsItem.
Topic的唯一的资源标识。
:param topic_urn: The topic_urn of this ListTopicsItem.
:type: str
"""
self._topic_urn = topic_urn
@property
def name(self):
"""Gets the name of this ListTopicsItem.
创建topic的名字。
:return: The name of this ListTopicsItem.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListTopicsItem.
创建topic的名字。
:param name: The name of this ListTopicsItem.
:type: str
"""
self._name = name
@property
def display_name(self):
"""Gets the display_name of this ListTopicsItem.
Topic的显示名,推送邮件消息时,作为邮件发件人显示。
:return: The display_name of this ListTopicsItem.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this ListTopicsItem.
Topic的显示名,推送邮件消息时,作为邮件发件人显示。
:param display_name: The display_name of this ListTopicsItem.
:type: str
"""
self._display_name = display_name
@property
def push_policy(self):
"""Gets the push_policy of this ListTopicsItem.
消息推送的策略,该属性目前不支持修改,后续将支持修改。0表示发送失败,保留到失败队列,1表示直接丢弃发送失败的消息。
:return: The push_policy of this ListTopicsItem.
:rtype: int
"""
return self._push_policy
@push_policy.setter
def push_policy(self, push_policy):
"""Sets the push_policy of this ListTopicsItem.
消息推送的策略,该属性目前不支持修改,后续将支持修改。0表示发送失败,保留到失败队列,1表示直接丢弃发送失败的消息。
:param push_policy: The push_policy of this ListTopicsItem.
:type: int
"""
self._push_policy = push_policy
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this ListTopicsItem.
企业项目ID。
:return: The enterprise_project_id of this ListTopicsItem.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this ListTopicsItem.
企业项目ID。
:param enterprise_project_id: The enterprise_project_id of this ListTopicsItem.
:type: str
"""
self._enterprise_project_id = enterprise_project_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTopicsItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
7f7621b29075cba866d4c2b7508de19821719201 | 2c6bc39f2adf3731109519bfaf8a3a24ae913834 | /admin/admin/settings.py | 60c38e44516af552aee83c9bf875de446377cff1 | [] | no_license | aliensmart/django-admin | a1289e1a01d64b416f64db1ed435ba23f4c2b8ca | 0732358e4ace57abbf621df66c75b85219226d07 | refs/heads/master | 2022-09-01T15:28:54.664846 | 2020-05-20T20:34:54 | 2020-05-20T20:34:54 | 265,679,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,085 | py | """
Django settings for admin project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y*wb35kj$9zpphxs5r)@*t)mer@+zc#6fol0ho29$#cis8r*ai'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'admin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'admin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
07cab7f377e53810bca7f3ea6fd25e8f93c45bf2 | ae7884af1ec3965b7c0eec22edad6b74f78b7ba6 | /server/src/uds/core/workers/stats_collector.py | 23a2506832f5b1b824b8e41f3fa32e05c785c451 | [] | no_license | glyptodon/openuds | f4eefa319a3ead827dad999d24e5ee3854d1345d | 3908c875d30ec332490fc8c049bb537e10f10d08 | refs/heads/master | 2021-07-12T20:58:49.281242 | 2021-03-05T22:42:55 | 2021-03-05T22:42:55 | 62,921,174 | 0 | 1 | null | 2016-07-08T22:33:44 | 2016-07-08T22:33:44 | null | UTF-8 | Python | false | false | 4,456 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2020 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
import logging
import typing
from uds.models import ServicePool, Authenticator
from uds.core.util.state import State
from uds.core.util.stats import counters
from uds.core.managers import statsManager
from uds.core.jobs import Job
logger = logging.getLogger(__name__)
class DeployedServiceStatsCollector(Job):
"""
This Job is responsible for collecting stats for every deployed service every ten minutes
"""
frecuency = 599 # Once every ten minutes, 601 is prime, 599 also is prime
friendly_name = 'Deployed Service Stats'
def run(self):
logger.debug('Starting Deployed service stats collector')
servicePoolsToCheck: typing.Iterable[ServicePool] = ServicePool.objects.filter(
state=State.ACTIVE
).iterator()
for servicePool in servicePoolsToCheck:
try:
fltr = servicePool.assignedUserServices().exclude(
state__in=State.INFO_STATES
)
assigned = fltr.count()
inUse = fltr.filter(in_use=True).count()
counters.addCounter(servicePool, counters.CT_ASSIGNED, assigned)
counters.addCounter(servicePool, counters.CT_INUSE, inUse)
except Exception:
logger.exception(
'Getting counters for service pool %s', servicePool.name
)
for auth in Authenticator.objects.all():
fltr = auth.users.filter(userServices__isnull=False).exclude(
userServices__state__in=State.INFO_STATES
)
users = auth.users.all().count()
users_with_service = fltr.distinct().count()
number_assigned_services = fltr.count()
counters.addCounter(auth, counters.CT_AUTH_USERS, users)
counters.addCounter(
auth, counters.CT_AUTH_SERVICES, number_assigned_services
)
counters.addCounter(
auth, counters.CT_AUTH_USERS_WITH_SERVICES, users_with_service
)
logger.debug('Done Deployed service stats collector')
class StatsCleaner(Job):
"""
This Job is responsible of housekeeping of stats tables.
This is done by:
* Deleting all records
* Optimize table
"""
frecuency = 3600 * 24 * 15 # Ejecuted just once every 15 days
friendly_name = 'Statistic housekeeping'
def run(self):
logger.debug('Starting statistics cleanup')
try:
statsManager().cleanupCounters()
except Exception:
logger.exception('Cleaning up counters')
try:
statsManager().cleanupEvents()
except Exception:
logger.exception('Cleaning up events')
logger.debug('Done statistics cleanup')
| [
"[email protected]"
] | |
115c7c7c5a07a0ed5e1214fc406d01cf55ee2eef | f1267f4a0fae414f16b2429a5c3b1cbd42df8794 | /lib/Daemon.py | dded7072b2770aaa31cf9b096453386af2a21d63 | [] | no_license | oraant/learn_zabbix_odbm | 3ff3b0318e802ebff9603c8daefdf67cda772b94 | 35a010b5dc0a8bc2989b4d3618f795b08a637063 | refs/heads/master | 2020-12-24T05:46:10.358982 | 2016-03-21T10:25:29 | 2016-03-21T10:25:29 | 73,452,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | # coding:utf-8
import sys,os
class Daemon:
def __init__(self,stdin='/dev/null',stdout='/dev/null', stderr='dev/null'):
'''初始化,指定标准输入输出文件'''
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def daemonize(self):
'''Fork当前进程为守护进程,重定向标准文件描述符'''
#Perform first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) #first parent out
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" %(e.errno, e.strerror))
sys.exit(1)
#从母体环境脱离,更改路径,更改默认权限,以及创建新的SESSION(为了摆脱控制终端,防止响应原SESSION的sighup,sigint等信号)
os.chdir("/")
os.umask(0)
os.setsid()
#执行第二次fork,防止建立了新SESSION的进程(已成为无终端的会话领导)打开新的终端。
try:
pid = os.fork()
if pid > 0:
sys.exit(0) #second parent out
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s]n" %(e.errno,e.strerror))
sys.exit(1)
#进程已经是守护进程了,重定向标准文件描述符
for f in sys.stdout, sys.stderr: f.flush()
si = file(self.stdin, 'r')
so = file(self.stdout,'a+')
se = file(self.stderr,'a+',0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if __name__ == '__main__':
logfile = sys.argv[1]
d = Daemon('/dev/null',logfile,logfile)
d.daemonize()
while(True):
pass
| [
"[email protected]"
] | |
1258f388ef158ca0387123f39fb49abe83baedb8 | bc01e1d158e7d8f28451a7e108afb8ec4cb7d5d4 | /sage/src/sage/interfaces/giac.py | a144910f149d64f01f0c0f4ac473b19e74454a0b | [] | no_license | bopopescu/geosci | 28792bda1ec1f06e23ba8dcb313769b98f793dad | 0d9eacbf74e2acffefde93e39f8bcbec745cdaba | refs/heads/master | 2021-09-22T17:47:20.194233 | 2018-09-12T22:19:36 | 2018-09-12T22:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,943 | py | r"""
Interface to Giac
(adapted by F. Han from William Stein and Gregg Musiker maple's interface)
You must have the optional Giac interpreter installed
and available as the command ``giac`` in your PATH in
order to use this interface. You need a giac version
supporting "giac --sage" ( roughly after 0.9.1 ). In this case you do not have
to install any optional Sage packages. If giac is not already installed, you can
download binaries or sources or spkg (follow the sources link) from the homepage:
Homepage <http://www-fourier.ujf-grenoble.fr/~parisse/giac.html>
Type ``giac.[tab]`` for a list of all the functions
available from your Giac install. Type
``giac.[tab]?`` for Giac's help about a given
function. Type ``giac(...)`` to create a new Giac
object, and ``giac.eval(...)`` to run a string using
Giac (and get the result back as a string).
If the giac spkg is installed, you should find the full html documentation there::
$SAGE_LOCAL/share/giac/doc/en/cascmd_local/index.html
EXAMPLES::
sage: giac('3 * 5') # optional - giac
15
sage: giac.eval('ifactor(2005)') # optional - giac
'5*401'
sage: giac.ifactor(2005) # optional - giac
2005
sage: l=giac.ifactors(2005) ; l; l[2] # optional - giac
[5,1,401,1]
401
sage: giac.fsolve('x^2=cos(x)+4', 'x','0..5') # optional - giac
[1.9140206190...
sage: giac.factor('x^5 - y^5') # optional - giac
(x-y)*(x^4+x^3*y+x^2*y^2+x*y^3+y^4)
sage: R.<x,y>=QQ[];f=(x+y)^5;f2=giac(f);(f-f2).normal() #optional - giac
0
sage: x,y=giac('x,y'); giac.int(y/(cos(2*x)+cos(x)),x) #random; optional - giac
y*2*((-(tan(x/2)))/6+(-2*1/6/sqrt(3))*ln(abs(6*tan(x/2)-2*sqrt(3))/abs(6*tan(x/2)+2*sqrt(3))))
If the string "error" (case insensitive) occurs in the output of
anything from Giac, a RuntimeError exception is raised.
Tutorial
--------
AUTHORS:
- Gregg Musiker (2006-02-02): initial version.
(adapted to giac by F.Han)
This tutorial is based on the Maple Tutorial for number theory from
http://www.math.mun.ca/~drideout/m3370/numtheory.html.
There are several ways to use the Giac Interface in Sage. We will
discuss two of those ways in this tutorial.
#. If you have a giac expression such as
::
factor( (x^5-1));
We can write that in sage as
::
sage: giac('factor(x^5-1)') # optional - giac
(x-1)*(x^4+x^3+x^2+x+1)
Notice, there is no need to use a semicolon.
#. Since Sage is written in Python, we can also import giac
commands and write our scripts in a pythonic way. For example,
``factor()`` is a giac command, so we can also factor
in Sage using
::
sage: giac('(x^5-1)').factor() # optional - giac
(x-1)*(x^4+x^3+x^2+x+1)
where ``expression.command()`` means the same thing as
``command(expression)`` in Giac. We will use this
second type of syntax whenever possible, resorting to the first
when needed.
::
sage: giac('(x^12-1)/(x-1)').normal() # optional - giac
x^11+x^10+x^9+x^8+x^7+x^6+x^5+x^4+x^3+x^2+x+1
The normal command will reduce a rational function to the
lowest terms. In giac, simplify is slower than normal because it
tries more sophisticated simplifications (ex algebraic extensions)
The factor command will factor a polynomial with
rational coefficients into irreducible factors over the ring of
integers (if your default configuration of giac (cf .xcasrc) has not
allowed square roots). So for example,
::
sage: giac('(x^12-1)').factor( ) # optional - giac
(x-1)*(x+1)*(x^2+1)*(x^2-x+1)*(x^2+x+1)*(x^4-x^2+1)
::
sage: giac('(x^28-1)').factor( ) # optional - giac
(x-1)*(x+1)*(x^2+1)*(x^6-x^5+x^4-x^3+x^2-x+1)*(x^6+x^5+x^4+x^3+x^2+x+1)*(x^12-x^10+x^8-x^6+x^4-x^2+1)
Another important feature of giac is its online help. We can
access this through sage as well. After reading the description of
the command, you can press q to immediately get back to your
original prompt.
Incidentally you can always get into a giac console by the
command.
::
sage: giac.console() # not tested
sage: !giac # not tested
Note that the above two commands are slightly different, and the
first is preferred.
For example, for help on the giac command factors, we type ::
sage: giac.help('factors') # not tested
::
sage: alpha = giac((1+sqrt(5))/2) # optional - giac
sage: beta = giac(1-sqrt(5))/2 # optional - giac
sage: f19 = alpha^19 - beta^19/sqrt(5) # optional - giac
sage: f19 # optional - giac
(sqrt(5)/2+1/2)^19-((-sqrt(5)+1)/2)^19/sqrt(5)
sage: (f19-(5778*sqrt(5)+33825)/5).normal() # optional - giac
0
Let's say we want to write a giac program now that squares a
number if it is positive and cubes it if it is negative. In giac,
that would look like
::
mysqcu := proc(x)
if x > 0 then x^2;
else x^3; fi;
end;
In Sage, we write
::
sage: mysqcu = giac('proc(x) if x > 0 then x^2 else x^3 fi end') # optional - giac
sage: mysqcu(5) # optional - giac
25
sage: mysqcu(-5) # optional - giac
-125
More complicated programs should be put in a separate file and
loaded.
"""
#############################################################################
# Copyright (C) 2005 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#############################################################################
from __future__ import print_function
import os
from sage.interfaces.expect import Expect, ExpectElement, ExpectFunction, FunctionElement, gc_disabled
from sage.interfaces.tab_completion import ExtraTabCompletion
import pexpect
from sage.env import DOT_SAGE
from sage.misc.pager import pager
COMMANDS_CACHE = '%s/giac_commandlist_cache.sobj'%DOT_SAGE
class Giac(Expect):
r"""
Interface to the Giac interpreter.
You must have the optional Giac interpreter installed and available as the command ``giac`` in your PATH in order to use this interface. Try the command: print(giac._install_hints()) for more informations on giac installation.
Type ``giac.[tab]`` for a list of all the functions available from your Giac install.
Type ``giac.[tab]?`` for Giac's help about a given function.
Type ``giac(...)`` to create a new Giac object.
Full html documentation for giac is avaible from your giac installation at ``$PREFIX``/share/giac/doc/en/cascmd_en/index.html
EXAMPLES:
Any Giac instruction can be evaluated as a string by the giac command. You can access the giac functions by adding the ``giac.`` prefix to the usual Giac name.
::
sage: l=giac('normal((y+sqrt(2))^4)'); l # optional - giac
y^4+4*sqrt(2)*y^3+12*y^2+8*sqrt(2)*y+4
sage: f=giac('(u,v)->{ if (u<v){ [u,v] } else { [v,u] }}');f(1,2),f(3,1) # optional - giac
([1,2], [1,3])
The output of the giac command is a Giac object, and it can be used for another giac command.
::
sage: l.factors() #optional - giac
[y+sqrt(2),4]
sage: giac('(x^12-1)').factor( ) # optional - giac
(x-1)*(x+1)*(x^2+1)*(x^2-x+1)*(x^2+x+1)*(x^4-x^2+1)
sage: giac('assume(y>0)'); giac('y^2=3').solve('y') #optional - giac
y
...[sqrt(3)]
You can create some Giac elements and avoid many quotes like this:
::
sage: x,y,z=giac('x,y,z');type(y) # optional - giac
<class 'sage.interfaces.giac.GiacElement'>
sage: I1=(1/(cos(2*y)+cos(y))).integral(y,0,pi/4).simplify() #optional - giac
sage: (I1-((-2*ln((sqrt(3)-3*tan(1/8*pi))/(sqrt(3)+3*tan(1/8*pi)))*sqrt(3)-3*tan(1/8*pi))/9)).normal() # optional - giac
0
sage: ((y+z*sqrt(5))*(y-sqrt(5)*z)).normal() # optional - giac
y^2-5*z^2
Polynomials or elements of SR can be evaluated directly by the giac interface.
::
sage: R.<a,b>=QQ[];f=(2+a+b);p=giac.gcd(f^3+5*f^5,f^2+f^5);p;R(p); #optional - giac
a^2+2*a*b+4*a+b^2+4*b+4
a^2 + 2*a*b + b^2 + 4*a + 4*b + 4
Variable names in python and giac are independant.
::
sage: a=sqrt(2);giac('Digits:=30;a:=5');a,giac('a'),giac(a),giac(a).evalf() # optional - giac
30
(sqrt(2), 5, sqrt(2), 1.41421356237309504880168872421)
"""
def __init__(self, maxread=None, script_subdirectory=None, server=None, server_tmpdir=None, logfile=None):
"""
Create an instance of the Giac interpreter.
EXAMPLES::
sage: giac == loads(dumps(giac)) # optional - giac
True
"""
Expect.__init__(self,
name = 'giac',
prompt = '[0-9]*>> ',
command = "giac --sage",
init_code= ['maple_mode(0);I:=i;'], # coercion could be broken in maple_mode
script_subdirectory = script_subdirectory,
restart_on_ctrlc = False, server = server,
server_tmpdir = server_tmpdir,
verbose_start = False,
logfile = logfile,
eval_using_file_cutoff=1000)
def _function_class(self):
"""
EXAMPLES::
sage: giac._function_class() # optional - giac
<class 'sage.interfaces.giac.GiacFunction'>
::
sage: type(giac.diff) # optional - giac
<class 'sage.interfaces.giac.GiacFunction'>
"""
return GiacFunction
def _keyboard_interrupt(self):
"""
The pexepect interface for giac has a very poor support of keyboard interruptions.
"""
print("Interrupting %s..." % self)
self._expect.sendline(chr(3)) # send ctrl-c
self._expect.expect(self._prompt)
# self._expect.expect(self._prompt)
raise RuntimeError("Ctrl-c pressed while running %s"%self)
def __reduce__(self):
"""
EXAMPLES::
sage: giac.__reduce__()
(<function reduce_load_Giac at 0x...>, ())
sage: f, args = _
sage: f(*args)
Giac
"""
return reduce_load_Giac, tuple([])
def _read_in_file_command(self, filename):
r"""
Returns the string used to read filename into Giac.
EXAMPLES::
sage: giac._read_in_file_command('test') # optional - giac
'read "test"'
::
sage: filename = tmp_filename() # optional - giac
sage: f = open(filename,'w') # optional - giac
sage: f.write('xx := 22;\n') # optional - giac
sage: f.close() # optional - giac
sage: giac.read(filename) # optional - giac
sage: giac.get('xx').strip() # optional - giac
'22'
"""
return 'read "%s"'%filename
def _quit_string(self):
"""
EXAMPLES::
sage: giac._quit_string() # optional - giac
'@d'
::
sage: m = Giac() # optional - giac
sage: a = m(2) # optional - giac
sage: m.is_running() # optional - giac
True
sage: m.quit() # optional - giac
sage: m.is_running() # optional - giac
False
"""
return '@d'
def _install_hints(self):
"""
Hints for installing Giac on your computer.
EXAMPLES::
sage: print(giac._install_hints())
In order...
"""
return r"""
In order to use the Giac interface you need to have Giac installed
and have a program called "giac" in your PATH. You need a giac version
supporting "giac --sage" ( roughly after 0.9.1 of march 2011). Some giac
instructions and the help's langage depend of you LANG variable. To obtain
inline help for giac commands, you also need to have the program "cas_help"
in your PATH.
If giac is not already installed, you can download binaries or sources
or a spkg ( for the spkg follow the sources link) from the homepage:
Homepage http://www-fourier.ujf-grenoble.fr/~parisse/giac.html
Full html documentation for giac is avaible from your giac installation at:
``$PREFIX``/share/giac/doc/en/cascmd_en/index.html
If you got giac from the spkg then ``$PREFIX`` is ``$SAGE_LOCAL``
"""
def expect(self):
"""
Returns the pexpect object for this Giac session.
EXAMPLES::
sage: m = Giac() # optional - giac
sage: m.expect() is None # optional - giac
True
sage: m._start() # optional - giac
sage: m.expect() # optional - giac
Giac with PID ... running .../giac --sage
sage: m.quit() # optional - giac
"""
return self._expect
def console(self):
"""
Spawn a new Giac command-line session.
EXAMPLES::
sage: giac_console() # not tested - giac
...
Homepage http://www-fourier.ujf-grenoble.fr/~parisse/giac.html
Released under the GPL license 3.0 or above
See http://www.gnu.org for license details
-------------------------------------------------
Press CTRL and D simultaneously to finish session
Type ?commandname for help
0>>
"""
giac_console()
def completions(self, s):
"""
Return all commands that complete the command starting with the
string s.
EXAMPLES::
sage: c = giac.completions('cas') # optional - giac
sage: 'cas_setup' in c # optional - giac
True
"""
if self._expect is None:
self._start()
E = self._expect
E.sendline('%s%s%s'%(s,chr(63),chr(13)))
t = E.timeout
E.timeout=0.3 # since some things have no completion
try:
E.expect('----')
except pexpect.TIMEOUT:
E.timeout = t
return []
E.timeout = t
v = E.before
E.expect(self._prompt)
E.expect(self._prompt)
return v.split()[1:]
def _commands(self):
"""
Return list of all commands defined in Giac.
EXAMPLES::
sage: c = giac._commands() # optional - giac
sage: len(c) > 100 # optional - giac
True
sage: 'Psi' in c # optional - giac
True
"""
try:
v = sum([self.completions(chr(65+n)) for n in range(26)], []) + \
sum([self.completions(chr(97+n)) for n in range(26)], [])
except RuntimeError:
print("\n" * 3)
print("*" * 70)
print("WARNING: You do not have a working version of Giac installed!")
print("*" * 70)
v = []
v.sort()
return v
def _tab_completion(self, verbose=True, use_disk_cache=True):
"""
Returns a list of all the commands defined in Giac and optionally
(per default) store them to disk.
EXAMPLES::
sage: c = giac._tab_completion(use_disk_cache=False, verbose=False) # optional - giac
sage: len(c) > 100 # optional - giac
True
sage: 'factors' in c # optional - giac
True
"""
try:
return self.__tab_completion
except AttributeError:
import sage.misc.persist
if use_disk_cache:
try:
self.__tab_completion = sage.misc.persist.load(COMMANDS_CACHE)
return self.__tab_completion
except IOError:
pass
if verbose:
print("\nBuilding Giac command completion list (this takes")
print("a few seconds only the first time you do it).")
print("To force rebuild later, delete %s." % COMMANDS_CACHE)
v = self._commands()
self.__tab_completion = v
if len(v) > 200:
# Giac is actually installed.
sage.misc.persist.save(v, COMMANDS_CACHE)
return v
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the Giac session has used. If
``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = giac.cputime() # optional - giac
sage: t # random; optional - giac
0.02
sage: x = giac('x') # optional - giac
sage: giac.diff(x^2, x) # optional - giac
2*x
sage: giac.cputime(t) # random; optional - giac
0.0
"""
if t is None:
return float(self('time()'))
else:
return float(self('time() - %s'%float(t)))
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=False):
"""
EXAMPLES::
sage: giac._eval_line('2+2') # optional - giac
'4'
sage: A=matrix([range(280)]) # optional - giac
sage: GA=giac(A) # optional - giac
"""
with gc_disabled():
z = Expect._eval_line(self, line, allow_use_file=allow_use_file,
wait_for_prompt=wait_for_prompt)
if z.lower().find("error") != -1:
raise RuntimeError("An error occurred running a Giac command:\nINPUT:\n%s\nOUTPUT:\n%s"%(line, z))
return z
def eval(self, code, strip=True, **kwds):
r"""
Send the code x to the Giac interpreter.
Remark: To enable multi-lines codes in the notebook magic mode: ``%giac``,
the ``\n`` are removed before sending the code to giac.
INPUT:
- code -- str
- strip -- Default is True and removes ``\n``
EXAMPLES::
sage: giac.eval("2+2;\n3") #optional - giac
'4,3'
sage: giac.eval("2+2;\n3",False) # optional - giac
'4\n3'
sage: s='g(x):={\nx+1;\nx+2;\n}' # optional - giac
sage: giac(s) # optional - giac
(x)->{
x+1;
x+2;
}
sage: giac.g(5) # optional - giac
7
"""
#we remove \n to enable multiline code in the notebook magic mode %giac
if strip:
code = code.replace("\n","").strip()
ans = Expect.eval(self, code, strip=strip, **kwds).strip()
return ans
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: giac.set('xx', '2') # optional - giac
sage: giac.get('xx') # optional - giac
'2'
"""
cmd = '%s:=%s:;'%(var,value) #if giac is not in maple mode ( maple_mode(0))
out = self.eval(cmd)
if out.find("error") != -1:
raise TypeError("Error executing code in Giac\nCODE:\n\t%s\nGiac ERROR:\n\t%s"%(cmd, out))
def get(self, var):
"""
Get the value of the variable var.
EXAMPLES::
sage: giac.set('xx', '2') # optional - giac
sage: giac.get('xx') # optional - giac
'2'
"""
s = self.eval('%s'%var)
return s
def _object_class(self):
"""
Returns the class of GiacElements.
EXAMPLES::
sage: giac._object_class()
<class 'sage.interfaces.giac.GiacElement'>
::
sage: m = giac(2) # optional - giac
sage: type(m) # optional - giac
<class 'sage.interfaces.giac.GiacElement'>
"""
return GiacElement
def _function_element_class(self):
"""
Returns the GiacFunctionElement class.
EXAMPLES::
sage: giac._function_element_class()
<class 'sage.interfaces.giac.GiacFunctionElement'>
::
sage: two = giac(2) # optional - giac
sage: type(two.gcd) # optional - giac
<class 'sage.interfaces.giac.GiacFunctionElement'>
"""
return GiacFunctionElement
def _equality_symbol(self):
"""
Returns the symbol used for equality testing in Giac.
EXAMPLES::
sage: giac._equality_symbol() # optional - giac
'=='
sage: giac(2) == giac(2) # optional - giac
True
"""
return '=='
def _true_symbol(self):
"""
Returns the symbol used for truth in Giac.
EXAMPLES::
sage: giac._true_symbol()
'1'
::
sage: giac(2) == giac(2) # optional - giac
True
"""
return '1'
def _assign_symbol(self):
"""
Returns the symbol used for assignment in Giac.
EXAMPLES::
sage: giac._assign_symbol()
':='
"""
return ":="
def _help(self, str):
r"""
Returns the Giac help on ``str``.
EXAMPLES::
sage: giac._help('gcd') # not tested ; output may vary (LANG)
"...gcd - greatest common divisor of polynomials...
"""
return os.popen('cas_help %s'%str).read()
# return os.popen('echo "?%s" | giac'%str).read()
def help(self, str):
"""
Display Giac help about str. This is the same as typing "?str" in
the Giac console.
INPUT:
- ``str`` - a string to search for in the giac help
system
EXAMPLES::
sage: giac.help('Psi') # not tested - depends of giac and $LANG
Psi(a,n)=nth-derivative of the function DiGamma (=ln@Gamma) at point a (Psi(a,0)=Psi(a))...
"""
pager()(self._help(str))
def clear(self, var):
"""
Clear the variable named var.
EXAMPLES::
sage: giac.set('xx', '2') # optional - giac
sage: giac.get('xx') # optional - giac
'2'
sage: giac.clear('xx') # optional - giac
sage: giac.get('xx') # optional - giac
'xx'
"""
self.eval('purge(%s)'%var)
def version(self):
"""
Wrapper for giac's version().
EXAMPLES::
sage: giac.version() # optional - giac
"giac...
"""
return giac('version()')
class GiacFunction(ExpectFunction):
def _sage_doc_(self):
"""
Returns the Giac help for this function. This gets called when
doing "?" on self.
EXAMPLES::
sage: giac.gcd._sage_doc_() # not tested ; output may vary LANG
"gcd - greatest common divisor of polynomials...
"""
M = self._parent
return M._help(self._name)
class GiacFunctionElement(FunctionElement):
def _sage_doc_(self):
"""
Returns the Giac help for this function. This gets called when
doing "?" on self.
EXAMPLES::
sage: two = giac(2) # optional - giac
sage: two.gcd._sage_doc_() # not tested; output may vary LANG
"...gcd - greatest common divisor of polynomials...
"""
return self._obj.parent()._help(self._name)
class GiacElement(ExpectElement):
def __float__(self):
"""
Returns a floating point version of self.
EXAMPLES::
sage: float(giac(1/2)) # optional - giac
0.5
sage: type(_) # optional - giac
<type 'float'>
"""
return float(giac.eval('evalf(%s)' % self.name()))
def unapply(self, var):
"""
Creates a Giac function in the given arguments from a Giac symbol.
EXAMPLES::
sage: f=giac('y^3+1+t') # optional - giac
sage: g=(f.unapply('y,t')) # optional - giac
sage: g # optional - giac
(y,t)->y^3+1+t
sage: g(1,2) # optional - giac
4
"""
return giac('unapply(%s,%s)'%(self,var))
def __hash__(self):
"""
Returns a integer representing the hash of self.
These examples are optional, and require Giac to be installed. You
don't need to install any Sage packages for this.
EXAMPLES::
sage: m = giac('x^2+y^2') # optional - giac
sage: hash(m) # random; optional - giac
4614285348919569149
"""
return hash(giac.eval('string(%s);'%self.name()))
def __cmp__(self, other):
"""
Compare equality between self and other, using giac.
These examples are optional, and require Giac to be installed. You
don't need to install any Sage packages for this.
EXAMPLES::
sage: a = giac(5) # optional - giac
sage: b = giac(5) # optional - giac
sage: a == b # optional - giac
True
sage: a == 5 # optional - giac
True
::
sage: c = giac(3) # optional - giac
sage: a == c # optional - giac
False
sage: a < c # optional - giac
False
sage: a < 6 # optional - giac
True
sage: c <= a # optional - giac
True
::
TESTS::
sage: x = var('x') # optional - giac
sage: t = giac((x+1)^2) # optional - giac
sage: u = giac(x^2+2*x+1) # optional - giac
sage: u == t # optional - giac
False
"""
P = self.parent()
if P.eval("evalb(%s %s %s)"%(self.name(), P._equality_symbol(),
other.name())) == P._true_symbol():
return 0
# (to be tested with giac). Maple does not allow comparing objects of different types and
# it raises an error in this case.
# We catch the error, and return True for <
try:
if P.eval("evalb(%s %s %s)"%(self.name(), P._lessthan_symbol(), other.name())) == P._true_symbol():
return -1
except RuntimeError as e:
msg = str(e)
if 'is not valid' in msg and 'to < or <=' in msg:
if (hash(str(self)) < hash(str(other))):
return -1
else:
return 1
else:
raise RuntimeError(e)
if P.eval("evalb(%s %s %s)"%(self.name(), P._greaterthan_symbol(), other.name())) == P._true_symbol():
return 1
# everything is supposed to be comparable in Python, so we define
# the comparison thus when no comparable in interfaced system.
if (hash(self) < hash(other)):
return -1
else:
return 1
def _tab_completion(self):
"""
EXAMPLES::
sage: a = giac(2) # optional - giac
sage: 'sin' in a._tab_completion() # optional - giac
True
"""
return self.parent()._tab_completion()
def __len__(self):
"""
EXAMPLES::
sage: len(giac([1,2,3])) # optional - giac
3
"""
return int(self.size())
def __iter__(self):
"""
EXAMPLES::
sage: l = giac([1,2,3]) # optional - giac
sage: list(iter(l)) # optional - giac
[1, 2, 3]
"""
for i in range(len(self)): # zero-indexed if giac is maple_mode(0)
yield self[i]
def __del__(self):
"""
Note that clearing object is pointless since it wastes time.
(Ex: otherwise doing a=0 after a = (giac('x+y+z')^40).normal() is very slow )
EXAMPLES::
sage: a = giac(2) # optional - giac
sage: a.__del__() # optional - giac
sage: a # optional - giac
2
sage: del a # optional - giac
sage: a
Traceback (most recent call last):
...
NameError: name 'a' is not defined
"""
return
def __repr__(self):
"""
Return a string representation of self.
These examples are optional, and require Giac to be installed. You
don't need to install any Sage packages for this.
EXAMPLES::
sage: x = var('x')
sage: giac(x) # optional - giac
x
sage: giac(5) # optional - giac
5
sage: M = matrix(QQ,2,range(4)) # optional - giac
sage: giac(M) # optional - giac
[[0,1],[2,3]]
"""
self._check_valid()
return self.parent().get(self._name)
def _latex_(self):
r"""
You can output Giac expressions in latex.
EXAMPLES::
sage: print(latex(giac('(x^4 - y)/(y^2-3*x)'))) # optional - giac
"\frac{(x^{4}-y)}{(y^{2}-3\cdot x)}"
"""
return self.parent().eval('latex(%s)'%self.name())
def _matrix_(self, R):
r"""
Return matrix over the (Sage) ring R determined by self, where self
should be a Giac matrix.
Warning: It is slow, don't convert big matrices.
EXAMPLES::
sage: R.<x,y>=QQ[] # optional - giac
sage: M=giac('matrix(4,4,(k,l)->(x^k-y^l))'); M # optional - giac
matrix[[0,1-y,1-y^2,1-y^3],[x-1,x-y,x-y^2,x-y^3],[x^2-1,x^2-y,x^2-y^2,x^2-y^3],[x^3-1,x^3-y,x^3-y^2,x^3-y^3]]
sage: M.eigenvals() # random; optional - giac
0,0,(x^3+x^2+x-y^3-y^2-y+sqrt(x^6+2*x^5+3*x^4-14*x^3*y^3+2*x^3*y^2+2*x^3*y+6*x^3+2*x^2*y^3-14*x^2*y^2+2*x^2*y+5*x^2+2*x*y^3+2*x*y^2-14*x*y+4*x+y^6+2*y^5+3*y^4+6*y^3+5*y^2+4*y-12))/2,(x^3+x^2+x-y^3-y^2-y-sqrt(x^6+2*x^5+3*x^4-14*x^3*y^3+2*x^3*y^2+2*x^3*y+6*x^3+2*x^2*y^3-14*x^2*y^2+2*x^2*y+5*x^2+2*x*y^3+2*x*y^2-14*x*y+4*x+y^6+2*y^5+3*y^4+6*y^3+5*y^2+4*y-12))/2
sage: Z=matrix(R,M);Z # optional - giac
[ 0 -y + 1 -y^2 + 1 -y^3 + 1]
[ x - 1 x - y -y^2 + x -y^3 + x]
[ x^2 - 1 x^2 - y x^2 - y^2 -y^3 + x^2]
[ x^3 - 1 x^3 - y x^3 - y^2 x^3 - y^3]
sage: parent(Z) # optional - giac
Full MatrixSpace of 4 by 4 dense matrices over Multivariate Polynomial Ring in x, y over Rational Field
"""
v = self.dim()
n = int(v[0])
m = int(v[1])
from sage.matrix.matrix_space import MatrixSpace
M = MatrixSpace(R, n, m)
entries = [[R(self[r, c]) for c in range(m)] for r in range(n)]
return M(entries)
def _sage_(self):
r"""
Convert a giac expression back to a Sage expression.
This currently does not implement a parser for the Giac output language,
therefore only very simple expressions will convert successfully.
Warning: List conversion is slow.
EXAMPLE::
sage: m = giac('x^2 + 5*y') # optional - giac
sage: m.sage() # optional - giac
x^2 + 5*y
::
sage: m = giac('sin(2*sqrt(1-x^2)) * (1 - cos(1/x))^2') # optional - giac
sage: m.trigexpand().sage() # optional - giac
2*cos(sqrt(-x^2 + 1))*cos(1/x)^2*sin(sqrt(-x^2 + 1)) - 4*cos(sqrt(-x^2 + 1))*cos(1/x)*sin(sqrt(-x^2 + 1)) + 2*cos(sqrt(-x^2 + 1))*sin(sqrt(-x^2 + 1))
"""
result = repr(self)
if str(self.type()) != 'DOM_LIST' :
try:
from sage.symbolic.all import SR
return SR(result)
except Exception:
raise NotImplementedError("Unable to parse Giac output: %s" % result)
else:
return [entry.sage() for entry in self]
def integral(self, var='x', min=None, max=None):
r"""
Return the integral of self with respect to the variable x.
INPUT:
- ``var`` - variable
- ``min`` - default: None
- ``max`` - default: None
Returns the definite integral if xmin is not None, otherwise
returns an indefinite integral.
EXAMPLES::
sage: y=giac('y');f=(sin(2*y)/y).integral(y).simplify(); f # optional - giac
Si(2*y)
sage: f.diff(y).simplify() # optional - giac
sin(2*y)/y
::
sage: f = giac('exp(x^2)').integral('x',0,1) ; f # optional - giac
1.46265174...
sage: x,y=giac('x'),giac('y');integrate(cos(x+y),'x=0..pi').simplify() # optional - giac
-2*sin(y)
"""
if min is None:
return giac('int(%s,%s)'%(self.name(),var))
else:
if max is None:
raise ValueError("neither or both of min/max must be specified.")
return giac('int(%s,%s,%s,%s)'%(self.name(),var,giac(min),giac(max)))
integrate=integral
def sum(self, var, min=None, max=None):
r"""
Return the sum of self with respect to the variable x.
INPUT:
- ``var`` - variable
- ``min`` - default: None
- ``max`` - default: None
Returns the definite integral if xmin is not None, otherwise
returns an indefinite integral.
EXAMPLES::
sage: giac('1/(1+k^2)').sum('k',-oo,+infinity).simplify() # optional - giac
(pi*exp(pi)^2+pi)/(exp(pi)^2-1)
"""
if min is None:
return giac('sum(%s,%s)'%(self.name(),var))
else:
if max is None:
raise ValueError("neither or both of min/max must be specified.")
return giac('sum(%s,%s,%s,%s)'%(self.name(),var,giac(min),giac(max)))
# An instance
giac = Giac()
def reduce_load_Giac():
"""
Returns the giac object created in sage.interfaces.giac.
EXAMPLES::
sage: from sage.interfaces.giac import reduce_load_Giac
sage: reduce_load_Giac()
Giac
"""
return giac
def giac_console():
"""
Spawn a new Giac command-line session.
EXAMPLES::
sage: giac.console() # not tested - giac
...
Homepage http://www-fourier.ujf-grenoble.fr/~parisse/giac.html
Released under the GPL license 3.0 or above
See http://www.gnu.org for license details
-------------------------------------------------
Press CTRL and D simultaneously to finish session
Type ?commandname for help
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%giac magics instead.')
os.system('giac')
def __doctest_cleanup():
"""
EXAMPLES::
sage: from sage.interfaces.giac import __doctest_cleanup
sage: m = giac(2) # optional - giac
sage: giac.is_running() # optional - giac
True
sage: __doctest_cleanup()
sage: giac.is_running()
False
"""
import sage.interfaces.quit
sage.interfaces.quit.expect_quitall()
| [
"valber@HPC"
] | valber@HPC |
f41facc51474c9c8b75bdf9eb8cbff2452c343ac | f409f0b5be2bccdc76041a308b28964b00565c2b | /untitled/urls.py | 93f59be44255ae6fefe35db65a6c61417a4d3618 | [] | no_license | yingliufengpeng/demo_django_blog | b9df1e9176ffd66fe9cf6b8fcbad34092aaa8c53 | 27b3e88ebc7e84f8b4d2a8844abd35104bec2bdb | refs/heads/master | 2021-01-17T07:50:52.081607 | 2017-06-26T18:48:56 | 2017-06-26T18:48:56 | 95,317,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | """untitled URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.views import static
from demo import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r"^uploads/(?P<path>.*)$", static.serve, {"document_root": settings.MEDIA_ROOT}),
url(r"^login/", views.login, name='login'),
url(r"^logout/", views.logout, name='logout'),
url(r"^register/", views.register, name='register'),
url(r"^index/", views.index, name='index'),
url(r"^home/", views.home, name='home'),
url(r"^article/", views.article, name='article'),
url(r"^add_article/", views.add_article, name='add_article'),
url(r"^upload_img/", views.upload_img, name='upload_img'),
url(r"^article_ajax_add/", views.article_ajax_add, name='article_ajax_add'),
url(r"^modify_article/", views.modify_article, name='modify_article'),
url(r"^article_ajax_modify/", views.article_ajax_modify, name='article_ajax_modify'),
url(r"^article_ajax_delete/", views.article_ajax_delete, name='article_ajax_delete'),
]
| [
"[email protected]"
] | |
c2113be94bd6ef86abbc7380563b0a18cabd088f | f45cc0049cd6c3a2b25de0e9bbc80c25c113a356 | /LeetCode/动态规划法(dp)/背包问题/474. 一和零.py | ee1171e6057672507a105886d84a225938f263c0 | [] | no_license | yiming1012/MyLeetCode | 4a387d024969bfd1cdccd4f581051a6e4104891a | e43ee86c5a8cdb808da09b4b6138e10275abadb5 | refs/heads/master | 2023-06-17T06:43:13.854862 | 2021-07-15T08:54:07 | 2021-07-15T08:54:07 | 261,663,876 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | """
474. 一和零
给你一个二进制字符串数组 strs 和两个整数 m 和 n 。
请你找出并返回 strs 的最大子集的大小,该子集中 最多 有 m 个 0 和 n 个 1 。
如果 x 的所有元素也是 y 的元素,集合 x 是集合 y 的 子集 。
示例 1:
输入:strs = ["10", "0001", "111001", "1", "0"], m = 5, n = 3
输出:4
解释:最多有 5 个 0 和 3 个 1 的最大子集是 {"10","0001","1","0"} ,因此答案是 4 。
其他满足题意但较小的子集包括 {"0001","1"} 和 {"10","1","0"} 。{"111001"} 不满足题意,因为它含 4 个 1 ,大于 n 的值 3 。
示例 2:
输入:strs = ["10", "0", "1"], m = 1, n = 1
输出:2
解释:最大的子集是 {"0", "1"} ,所以答案是 2 。
提示:
1 <= strs.length <= 600
1 <= strs[i].length <= 100
strs[i] 仅由 '0' 和 '1' 组成
1 <= m, n <= 100
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ones-and-zeroes
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def findMaxForm1(self, strs: List[str], m: int, n: int) -> int:
"""
三维dp
@param strs:
@param m:
@param n:
@return:
"""
size = len(strs)
# dp[i][j][k]:表示前i个字符串构成j个0,k个1的最大子集
dp = [[[0] * (n + 1) for _ in range(m + 1)] for _ in range(size + 1)]
for i in range(1, size + 1):
zero = strs[i - 1].count('0')
one = strs[i - 1].count('1')
for j in range(m + 1):
for k in range(n + 1):
# 这里需要从前往后赋值
dp[i][j][k] = dp[i - 1][j][k]
if j >= zero and k >= one:
dp[i][j][k] = max(dp[i - 1][j][k], dp[i - 1][j - zero][k - one] + 1)
return dp[-1][-1][-1]
def findMaxForm2(self, strs: List[str], m: int, n: int) -> int:
"""
二维
@param strs:
@param m:
@param n:
@return:
"""
size = len(strs)
# dp[i][j][k]:表示前i个字符串构成j个0,k个1的最大子集
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, size + 1):
zero = strs[i - 1].count('0')
one = strs[i - 1].count('1')
for j in range(m, zero - 1, -1):
for k in range(n, one - 1, -1):
dp[j][k] = max(dp[j][k], dp[j - zero][k - one] + 1)
return dp[-1][-1]
if __name__ == '__main__':
strs = ["10", "0001", "111001", "1", "0"]
m = 5
n = 3
print(Solution().findMaxForm1(strs, m, n))
print(Solution().findMaxForm2(strs, m, n))
| [
"[email protected]"
] | |
56f16db5640a5744b67e7f88a950990ad72782a6 | 21b0b4c27193898207751c91b8b2ed168a1b1638 | /py/py_0383_divisibility_comparison_between_factorials.py | 6e66399ac758ee89f0245e09912ace51ce300130 | [
"MIT"
] | permissive | lcsm29/project-euler | 67560a4e66968f1671a3d7ecf2dda6c956893dca | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | refs/heads/main | 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # Solution of;
# Project Euler Problem 383: Divisibility comparison between factorials
# https://projecteuler.net/problem=383
#
# Let f5(n) be the largest integer x for which 5x divides n. For example,
# f5(625000) = 7. Let T5(n) be the number of integers i which satisfy
# f5((2·i-1)!) < 2·f5(i!) and 1 ≤ i ≤ n. It can be verified that T5(103) = 68
# and T5(109) = 2408210. Find T5(1018).
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 383
timed.caller(dummy, n, i, prob_id)
| [
"[email protected]"
] | |
da31943f12cab72657cccbf301ca3e51137991fa | 6b29d66ba7927129b68bc00db769f0edf1babaea | /SoftLayer/CLI/mq/endpoints_list.py | 179663919c224900057d00eea255084ae140b781 | [
"MIT"
] | permissive | tdurden82/softlayer-python | 65f42923c347a164995dfc267829721032de261d | 0eed20fa4adedd3228d91d929bb8befb1e445e49 | refs/heads/master | 2021-01-17T10:01:48.087450 | 2015-10-19T18:38:53 | 2015-10-19T18:38:53 | 46,301,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | """List SoftLayer Message Queue Endpoints."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
import click
@click.command()
@environment.pass_env
def cli(env):
"""List SoftLayer Message Queue Endpoints."""
manager = SoftLayer.MessagingManager(env.client)
regions = manager.get_endpoints()
table = formatting.Table(['name', 'public', 'private'])
for region, endpoints in regions.items():
table.add_row([
region,
endpoints.get('public') or formatting.blank(),
endpoints.get('private') or formatting.blank(),
])
env.fout(table)
| [
"[email protected]"
] | |
92916397d8bf8d6741c6ac3a5ea1959e5458d171 | 4d87e41fa51a3f777512982553b9bf4f32325c2f | /Scripts/pip3-script.py | 7e22278ba12539d9a302792add86e495297ccf05 | [] | no_license | Leno1993/RecommendSystem | 75bc8a045abbd83a127133cac80feb3149ce2802 | c97126126e86dd309804aa7b5da8df62b6491472 | refs/heads/master | 2020-05-09T12:59:28.410270 | 2019-03-24T13:53:48 | 2019-03-24T13:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #!D:\PycharmWorkSpace\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
6cd99aa856870945383ad551d176b967727db0ba | 4851d160a423b4a65e81a75d5b4de5218de958ee | /Pig Sursurunga.py | cd4d4594217fdde6c6d1df1dd137ceb730f8f010 | [] | no_license | LarisaOvchinnikova/python_codewars | 519508e5626303dcead5ecb839c6d9b53cb3c764 | 5399f4be17e4972e61be74831703a82ce9badffd | refs/heads/master | 2023-05-05T14:52:02.100435 | 2021-05-25T18:36:51 | 2021-05-25T18:36:51 | 319,399,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | https://www.codewars.com/kata/5536aba6e4609cc6a600003d
import re
def sursurungal(txt):
arr = re.split(r'(\W+)', txt)
res = []
i = 0
while i < len(arr):
if arr[i].isdigit():
n = int(arr[i])
if n in [0,1]:
res.append(f"{arr[i]} {arr[i+2]}")
else:
word = arr[i+2]
word = word[:-1]
if n == 2: res.append(f"{n} bu{word}")
if 3<=n<=9: res.append(f"{n} {word}zo")
if n>=10: res.append(f"{n} ga{word}ga")
i+=3
else:
res.append(arr[i])
i+=1
return "".join(res) | [
"[email protected]"
] | |
8865db146159f578256de4ca7df771ec7049b312 | d2f4eb41c95e35a21c257554efbaf18a557d4f4a | /KneiborsClassfier.py | 9ebdbadd0a59fb28173de4d19d2b21347c5b7885 | [
"Apache-2.0"
] | permissive | madcow2021/Insect_Identification | 1d7fbf5ce4a5d72d4994e5af2078701787eb08b4 | ae9e30c09f47b343664b3cb18e893fedcd84b335 | refs/heads/master | 2022-02-03T22:31:17.108726 | 2019-06-05T01:34:46 | 2019-06-05T01:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # coding=utf-8
import pandas as pd
# 创建特征列表
column_names = ['P_rect', 'P_extend', 'P_spherical', 'P_leaf', 'P_circle', 'Species']
# column_names = ['P_rect', 'P_extend', 'P_spherical', 'P_leaf', 'P_circle','P_complecate', 'Species']
data = pd.read_csv('data/data.csv', names=column_names)
# print data.shape
# 这个功能快要被抛弃了,分割训练和测试集
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(data[column_names[0:5]], data[column_names[5]], test_size=0.25,
random_state=33)
# print Y_train.value_counts()
# print Y_test.value_counts()
# 数据整理,但是整形的,需要注意
# from sklearn.preprocessing import StandardScaler
# ss = StandardScaler()
# X_train = ss.fit_transform(X_train)
# X_test = ss.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
knc = KNeighborsClassifier()
knc.fit(X_train, Y_train)
knc_y_predict = knc.predict(X_test)
from sklearn.metrics import classification_report
print "LR 精确度:" + str(knc.score(X_test, Y_test))
print classification_report(Y_test, knc_y_predict, target_names=[ 'fly','wo','jingui','zhang','zhizhu'])
# 保存训练结果,供后面直接使用
from sklearn.externals import joblib
joblib.dump(knc,'model/knc.model')
| [
"[email protected]"
] | |
cd0dd0ac210bbca6c8922fd1b4b55b90ea0ad896 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /gfsa/model/end_to_end_stack.py | 0dcad41dab30b003212c181acf40bf2de50d6b57 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 16,457 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model components for an end-to-end-trainable graph/automaton hybrid.
The components defined in this module share a common interface
(input graph, node embeddings, edge embeddings)
-> (node embeddings, edge embeddings)
which allows them to be composed with each other. Note that most components
either modify node embeddings or edge embeddings but not both. Also note that
the output edge embeddings are allowed to be of a different size; in particular,
the components that add new edge types use SharedGraphContext.edges_are_embedded
to determine how to modify the edge embeddings.
"""
from typing import Dict, List, Optional, Tuple
import dataclasses
import flax
import gin
import jax
import jax.numpy as jnp
from gfsa import automaton_builder
from gfsa import jax_util
from gfsa.datasets import graph_bundle
from gfsa.model import automaton_layer
from gfsa.model import edge_supervision_models
from gfsa.model import graph_layers
from gfsa.model import model_util
from gfsa.model import side_outputs
# TODO(ddjohnson) Move common layers out of `edge_supervision_models`.
# Flax adds name keyword arguments.
# pylint: disable=unexpected-keyword-arg
NodeAndEdgeEmbeddings = Tuple[jax_util.NDArray, jax_util.NDArray]
@dataclasses.dataclass
class SharedGraphContext:
"""Shared information about the input graph.
Attributes:
bundle: The input graph.
static_metadata: Padded size of the graph.
edge_types_to_indices: Mapping from string edge names to edge type indices.
builder: Automaton builder associated with this graph.
edges_are_embedded: Whether the "edge_embeddings" represent edge types that
are embedded into vectors (True), or just edge type adjacency matrices
that are concatenated together.
"""
bundle: graph_bundle.GraphBundle
static_metadata: automaton_builder.EncodedGraphMetadata
edge_types_to_indices: Dict[str, int]
builder: automaton_builder.AutomatonBuilder
edges_are_embedded: bool
def _add_edges(old_edge_array,
new_edge_types,
edges_are_embedded,
add_reverse = True):
"""Helper function to add edges of a new edge type.
If edges_are_embedded=True, we assume `old_edge_dim` is an embedding matrix;
the new edges are embedded and then added into this matrix. Otherwise, we
assume `old_edge_dim` is a stacked set of adjacency matrices, and concatenate
the new types.
Args:
old_edge_array: <float32[num_nodes, num_nodes, old_edge_dim]>
new_edge_types: <float32[num_nodes, num_nodes, new_edge_types]>, which
should be between 0 and 1, one for each added edge type.
edges_are_embedded: Whether edge types are embedded.
add_reverse: Whether to add reverse edges as well with a different type.
Returns:
<float32[num_nodes, num_nodes, output_edge_dim]>, where
output_edge_dim = old_edge_dim if edges_are_embedded=True, and otherwise
output_edge_dim = old_edge_dim + new_edge_types
"""
if add_reverse:
new_edge_types = jnp.concatenate(
[new_edge_types, new_edge_types.transpose((1, 0, 2))], -1)
if edges_are_embedded:
# Project the outputs into new edge embeddings.
# (No bias is used so that an absorbing probability of 0 produces no change
# in the edge embeddings.)
new_edge_type_embeddings = flax.deprecated.nn.Dense(
new_edge_types,
features=old_edge_array.shape[-1],
bias=False,
name="new_edge_type_embeddings")
output_edge_array = old_edge_array + new_edge_type_embeddings
else:
# Concatenate new embedding.
output_edge_array = jnp.concatenate([old_edge_array, new_edge_types],
axis=-1)
return output_edge_array
def _shared_automaton_logic(
graph_context, node_embeddings,
edge_embeddings,
variant_weights):
"""Helper function for shared automaton logic."""
# Run the automaton.
edge_weights = automaton_layer.FiniteStateGraphAutomaton(
encoded_graph=graph_context.bundle.automaton_graph,
variant_weights=variant_weights,
dynamic_metadata=graph_context.bundle.graph_metadata,
static_metadata=graph_context.static_metadata,
builder=graph_context.builder)
return (node_embeddings,
_add_edges(edge_embeddings, edge_weights.transpose([1, 2, 0]),
graph_context.edges_are_embedded))
@flax.deprecated.nn.module
@gin.configurable
def variantless_automaton(
graph_context, node_embeddings,
edge_embeddings):
"""Runs an automaton without variants.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
Returns:
New node and edge embeddings. Node embeddings will not be modified. Edge
embeddings will be modified by adding a new edge type (either embedded or
concatenated based on graph_context.edges_are_embedded).
"""
return _shared_automaton_logic(
graph_context, node_embeddings, edge_embeddings, variant_weights=None)
@flax.deprecated.nn.module
@gin.configurable
def edge_variant_automaton(
graph_context,
node_embeddings,
edge_embeddings,
variant_edge_types = gin.REQUIRED):
"""Runs an automaton with variants based on edges in the input graph.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
variant_edge_types: List of edge types used as variants.
Returns:
New node and edge embeddings. Node embeddings will not be modified. Edge
embeddings will be modified by adding a new edge type (either embedded or
concatenated based on graph_context.edges_are_embedded).
"""
# Set up variants from edge types.
variant_edge_type_indices = [
graph_context.edge_types_to_indices[type_str]
for type_str in variant_edge_types
]
num_edge_types = len(graph_context.edge_types_to_indices)
variant_weights = edge_supervision_models.variants_from_edges(
graph_context.bundle, graph_context.static_metadata,
variant_edge_type_indices, num_edge_types)
return _shared_automaton_logic(graph_context, node_embeddings,
edge_embeddings, variant_weights)
@flax.deprecated.nn.module
@gin.configurable
def embedding_variant_automaton(
graph_context,
node_embeddings,
edge_embeddings,
num_variants = gin.REQUIRED):
"""Runs an automaton with variants based on node embeddings.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
num_variants: How many variants to use.
Returns:
New node and edge embeddings. Node embeddings will not be modified. Edge
embeddings will be modified by adding a new edge type (either embedded or
concatenated based on graph_context.edges_are_embedded).
"""
if num_variants <= 1:
raise ValueError(
"Must have at least one variant to use embedding_variant_automaton.")
# Generate variants using a pairwise readout of the node embeddings.
variant_logits = graph_layers.BilinearPairwiseReadout(
node_embeddings, num_variants, name="variant_logits")
variant_logits = side_outputs.encourage_discrete_logits(
variant_logits, distribution_type="categorical", name="variant_logits")
variant_weights = jax.nn.softmax(variant_logits)
return _shared_automaton_logic(graph_context, node_embeddings,
edge_embeddings, variant_weights)
@flax.deprecated.nn.module
@gin.configurable
def nri_encoder_readout(
graph_context,
node_embeddings,
edge_embeddings,
num_edge_types = gin.REQUIRED):
"""Modifies edge embeddings using an NRI encoder.
Note that we use a sigmoid rather than a softmax, because we don't necessarily
want to enforce having exactly one edge type per pair of nodes.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
num_edge_types: How many edge types to produce.
Returns:
New node and edge embeddings. Node embeddings will not be modified. Edge
embeddings will be modified by adding a new edge type (either embedded or
concatenated based on graph_context.edges_are_embedded).
"""
# Run the NRI readout layer.
logits = graph_layers.NRIReadout(
node_embeddings=node_embeddings, readout_dim=num_edge_types)
new_edge_weights = jax.nn.sigmoid(logits)
mask = (
jnp.arange(new_edge_weights.shape[0]) <
graph_context.bundle.graph_metadata.num_nodes)
new_edge_weights = jnp.where(mask[:, None, None], new_edge_weights,
jnp.zeros_like(new_edge_weights))
return (node_embeddings,
_add_edges(edge_embeddings, new_edge_weights,
graph_context.edges_are_embedded))
class UniformRandomWalk(flax.deprecated.nn.Module):
"""Adds edges according to a uniform random walk along the graph."""
@gin.configurable("UniformRandomWalk")
def apply(
self,
graph_context,
node_embeddings,
edge_embeddings,
forward_edge_types = gin.REQUIRED,
reverse_edge_types = gin.REQUIRED,
walk_length_log2 = gin.REQUIRED,
):
"""Modifies edge embeddings using a uniform random walk.
Uses an efficient repeated-squaring technique to compute the absorbing
distribution.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes,
num_nodes, edge_embedding_dim]>
forward_edge_types: Edge types to use in the forward direction. As a list
of lists to allow configuring groups of edges in config files; this will
be flattened before use.
reverse_edge_types: Edge types to use in the reverse direction. Note that
reversed edge types are given a separate embedding from forward edge
types; undirected edges should be represented by adding two edges in
opposite directions and then only using `forward_edge_types`. Also a
list of lists, as above.
walk_length_log2: Base-2 logarithm of maximum walk length; this determines
how many times we will square the transition matrix (doubling the walk
length).
Returns:
New node and edge embeddings. Node embeddings will not be modified. Edge
embeddings will be modified by adding a new edge type (either embedded or
concatenated based on graph_context.edges_are_embedded).
"""
num_nodes = node_embeddings.shape[0]
# pylint: disable=g-complex-comprehension
forward_edge_type_indices = [
graph_context.edge_types_to_indices[type_str]
for group in forward_edge_types
for type_str in group
]
reverse_edge_type_indices = [
graph_context.edge_types_to_indices[type_str]
for group in reverse_edge_types
for type_str in group
]
# pylint: enable=g-complex-comprehension
adjacency = graph_layers.edge_mask(
edges=graph_context.bundle.edges,
num_nodes=num_nodes,
num_edge_types=len(graph_context.edge_types_to_indices),
forward_edge_type_indices=forward_edge_type_indices,
reverse_edge_type_indices=reverse_edge_type_indices)
adjacency = jnp.maximum(adjacency, jnp.eye(num_nodes))
absorbing_logit = self.param(
"absorbing_logit",
shape=(),
initializer=lambda *_: jax.scipy.special.logit(0.1))
absorbing_prob = jax.nn.sigmoid(absorbing_logit)
nonabsorbing_prob = jax.nn.sigmoid(-absorbing_logit)
walk_matrix = nonabsorbing_prob * adjacency / jnp.sum(
adjacency, axis=1, keepdims=True)
# A, I
# A^2, A + I
# (A^2)^2 = A^4, (A + I)A^2 + (A + I) = A^3 + A^2 + A + I
# ...
def step(state, _):
nth_power, nth_partial_sum = state
return (nth_power @ nth_power,
nth_power @ nth_partial_sum + nth_partial_sum), None
(_, partial_sum), _ = jax.lax.scan(
step, (walk_matrix, jnp.eye(num_nodes)), None, length=walk_length_log2)
approx_visits = absorbing_prob * partial_sum
logits = model_util.safe_logit(approx_visits)
logits = model_util.ScaleAndShift(logits)
edge_weights = jax.nn.sigmoid(logits)
return (node_embeddings,
_add_edges(edge_embeddings, edge_weights[:, :, None],
graph_context.edges_are_embedded))
@flax.deprecated.nn.module
def ggnn_adapter(graph_context,
node_embeddings,
edge_embeddings):
"""Adapter function to run GGNN steps.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
Returns:
New node and edge embeddings. Node embeddings are processed by a GGNN,
and edge embeddings are returned unchanged.
"""
del graph_context
return (
edge_supervision_models.ggnn_steps(node_embeddings, edge_embeddings),
edge_embeddings,
)
@flax.deprecated.nn.module
def transformer_adapter(
graph_context, node_embeddings,
edge_embeddings):
"""Adapter function to run transformer blocks.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
Returns:
New node and edge embeddings. Node embeddings are processed by a
transformer, and edge embeddings are returned unchanged.
"""
return (
edge_supervision_models.transformer_steps(
node_embeddings,
edge_embeddings,
neighbor_mask=None,
num_real_nodes_per_graph=(
graph_context.bundle.graph_metadata.num_nodes),
mask_to_neighbors=False),
edge_embeddings,
)
@flax.deprecated.nn.module
def nri_adapter(graph_context,
node_embeddings,
edge_embeddings):
"""Adapter function to run NRI blocks.
Args:
graph_context: Input graph for this example.
node_embeddings: Current node embeddings, as <float32[num_nodes,
node_embedding_dim]>
edge_embeddings: Current edge embeddings, as <float32[num_nodes, num_nodes,
edge_embedding_dim]>
Returns:
New node and edge embeddings. Node embeddings are processed by a NRI-style
model, and edge embeddings are returned unchanged.
"""
return (
edge_supervision_models.nri_steps(
node_embeddings,
edge_embeddings,
num_real_nodes_per_graph=(
graph_context.bundle.graph_metadata.num_nodes)),
edge_embeddings,
)
ALL_COMPONENTS = {
"variantless_automaton": variantless_automaton,
"edge_variant_automaton": edge_variant_automaton,
"embedding_variant_automaton": embedding_variant_automaton,
"nri_encoder_readout": nri_encoder_readout,
"ggnn_adapter": ggnn_adapter,
"transformer_adapter": transformer_adapter,
"nri_adapter": nri_adapter,
"UniformRandomWalk": UniformRandomWalk,
}
| [
"[email protected]"
] | |
cea43589a7bb31e1bf0c658d9ea1813573b2e2bc | ab67bf011764b6c0b6803cd44985a5a2ad3f2593 | /udpsocket.py | 2b222871dc47eb1b8e436bd7d76fd4d52cdb877e | [] | no_license | pr0logas/streamSockets | cba0616ead596bf331eda4f54b6112a212e462fc | 3f759509dfcb556d3b6a25f11c9f512fb7be430b | refs/heads/master | 2022-11-25T06:09:17.503818 | 2020-07-27T13:53:15 | 2020-07-27T13:53:15 | 285,097,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | import socket
import os, sys, time
from time import sleep
MCAST_GRP = '10.10.10.10'
MCAST_PORT = 9004
MULTICAST_TTL = 10
bytes_size_to_process = 1024
time_between_data_seconds = 5
time_between_packets_float = 0.0055
def startSocket():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL)
def sendDataOverSocket(data, sleeptime):
if data:
bytes_size_to_process = sys.getsizeof(data)
#print("Serving UDP multicast data to: " + str(MCAST_GRP) + ":" + str(MCAST_PORT) + " " +
# str(bytes_size_to_process) + " bytes" +
# " (file size: " + str(os.stat('channels/currentFile.ts').st_size) + ")")
s.sendto(data, (MCAST_GRP, MCAST_PORT))
sleep(sleeptime)
def adjustTimeForNewData(start, end, sleeptime):
result = (time_between_data_seconds - (end-start))
if result < 0:
print("No sleep needed we are {} seconds late to stream the data!".format(result) + " Next sleep: " + str(sleeptime))
else:
print("Sleeping for {} Waiting for next data...".format(result) + " Next sleep: " + str(sleeptime))
while True:
starttime = time.time()
with open("channels/currentFile.ts", "rb", buffering=1) as f:
byte = f.read(bytes_size_to_process)
expectedPackets = os.stat('channels/currentFile.ts').st_size / bytes_size_to_process
print(expectedPackets)
sleepTime = (time_between_data_seconds / expectedPackets) - 0.000120256
sendDataOverSocket(byte, sleepTime)
while byte:
byte = f.read(bytes_size_to_process)
sendDataOverSocket(byte, sleepTime)
f.close()
endtime = time.time()
adjustTimeForNewData(starttime, endtime, sleepTime)
#sleep(time_between_packets_float) | [
"[email protected]"
] | |
4ca7dd8882f263f5749f1eecebddf59f13b12871 | 0969f7c85e5ae0a19982077d6bb702c41b2b1e1f | /nets/mobilenet/mobilenet_v2.py | 02f5fa0510270fecc4ea3bd20c7f4da25bad20b1 | [
"MIT"
] | permissive | 353622088/tianchi | 544e49bb6720c4978188cdbddd88a0ebe9f5669c | e1f378e5fd783eb4cfbfaf8ecdd944b8fcfdd733 | refs/heads/master | 2020-04-19T09:06:35.946147 | 2019-01-30T09:30:05 | 2019-01-30T09:30:05 | 168,099,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,434 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
import functools
slim = tf.contrib.slim
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v2_100 = wrapped_partial(mobilenet, depth_multiplier=1.00)
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.40)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
| [
"[email protected]"
] | |
e74010a40ad06fe82916fea9a7e6c222b087a685 | cb83b02ead1cb77c87e117118f7e5cd3ecf46ba1 | /sistema_plantilla/settings/settings.py | 6652e9955e4d55b35976610917e962c7d8b0c985 | [] | no_license | danielhuamani/sistema-plantilla-saas | f834d90157b3d0ab1724fe7d3be5e9224cf753ae | 8802a4b429fdce9ce433539684b52e2177042c35 | refs/heads/master | 2020-12-11T01:48:45.313743 | 2016-01-18T16:10:24 | 2016-01-18T16:10:24 | 48,857,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,996 | py | """
Django settings for settings project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from os.path import dirname, join, realpath
BASE_DIR = dirname(dirname(realpath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vhgbr0j26ii9t4juw%_z)_^wm8st_#1$8zrj4yq7!5b)7-@554'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.productos',
'apps.clientes',
'apps.configuracion',
'apps.theme',
'apps.theme_admin',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'settings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (join(BASE_DIR, 'templates'),),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'settings.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
| [
"[email protected]"
] | |
80793db4fcb6d003bcd7f9d86fe4acae5bc1a6c0 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/6ae12eacdae24553a91c0270cb101e66.py | 5d6e3848a1fa367f700bac002a2b381e701f99cc | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 281 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what is None or what.strip() == "":
return "Fine. Be that way!"
if what.isupper():
return "Whoa, chill out!"
if what.endswith("?") or what.endswith(" "):
return "Sure."
return "Whatever."
| [
"[email protected]"
] | |
89e0353d4de23f2ac613d436afbbec0a40354e19 | e8ef02248600525a114c9ed9a6098e95d8007063 | /qtlab/scripts/sal/ff_powersweep.py | 7966c043a04204757185031af05d8a6ff6e2df04 | [] | no_license | mgely/papyllon | ac264e202298728f6ca69d566c1fe45a9de0dc1c | 490c756da8f08c971864dcd983ea82c944bc8c85 | refs/heads/master | 2021-01-10T06:28:17.250944 | 2016-02-26T13:49:21 | 2016-02-26T13:49:21 | 46,259,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | #prepare environment
import qt
import visa
import numpy as np
from numpy import pi, random, arange, size, array, sin, cos, diff, absolute,zeros, sign,ceil,sqrt,absolute
from time import time, sleep, localtime, strftime
execfile('metagen.py')
#Check and load instrument plugins
instlist = qt.instruments.get_instrument_names()
print "installed instruments: "+" ".join(instlist)
#install the drivers no check
if 'med' not in instlist:
med = qt.instruments.create('med','med')
#if 'adwin' not in instlist:
# adwin= qt.instruments.create('adwin', 'ADwin_DAC',address=0x255)
if 'ff' not in instlist:
ff=visa.instrument('TCPIP0::192.168.1.151::inst0::INSTR')
instlist = qt.instruments.get_instrument_names()
print "Available instruments: "+" ".join(instlist)
#measurement information stored in manual in MED instrument
#med.set_device('ShunDevice')
#med.set_setup('BF_4, conversion is 1uA/V')
#med.set_user('Shun')
qt.mstart()
spyview_process(reset=True) #clear old meta-settings
filename = 'EOS8_C_FF'
data = qt.Data(name=filename)
data.add_coordinate('Probe Frequency [MHz]')
data.add_coordinate('Voltage [uA]')
data.add_value('S21 [abs]')
data.add_value('S21 [rad]')
#data.create_file()
data.create_file(name=filename, datadirs='D:\\data\\Sal\\EOS8_C\\temp_powersweep')
data.copy_file('FF_powersweep.py')
kHz = 1e3
MHz = 1e6
GHz = 1e9
####Settings:
#Current temperature
# 18mK
## 10dB on VNA out
## miteq on the input port 2
## I to V conversion 100uA/1Volt
##
######### Variables for NA
pinit=-45
bw=30
f_start=5.272*GHz
f_stop=5.372*GHz
f_pts=401
##hanger_f0=5900.59*MHz
##hanger_span=1000*kHz
##f1_start=hanger_f0-hanger_span/2
##f1_stop=hanger_f0+hanger_span/2
### Variables for field
#v_start=0
#v_stop=1.5
#v_pts=1501
### Variables for power
p_start = -45
p_stop =0
p_pts =10
### Preparing NA
ff.write('INST:SEL "NA";*OPC?')
ff.write('FREQ:STOP '+str(f_stop)+'\n')
ff.write('FREQ:STAR '+str (f_start)+'\n')
ff.write('BWID '+str(bw)+'\n')
ff.write('SOUR:POW '+str(pinit)+'\n')
ff.write('SWE:POIN '+str(f_pts)+'\n')
ff.write('CALC:PAR:DEF S21 \r')
### Prepare ADWIN for current sweep
#adwin.start_process()
########### making lists of values to be measured ###########
f_list=np.linspace(f_start,f_stop,f_pts)
#v_list=np.linspace(v_start,v_stop,v_pts)
p_list = np.linspace(p_start,p_stop,p_pts)
##################################################
qt.msleep(0.1)
for p in p_list:
print 'current power '+str(p)+' power'
ff.write('SOUR:POW ' +str(p)+'\n')
print ff.ask('SOUR:POW?')
#adwin.set_DAC_2(v)
qt.msleep(2)
#setting tarce 1
ff.write('INIT \r')
qt.msleep(15)
ff.write('CALC:FORM MLOG \r')
qt.msleep(2)
trace_mlog = eval(ff.ask('CALC:DATA:FDAT? \r'))
qt.msleep(2)
ff.write('CALC:FORM PHAS \r')
qt.msleep(2)
trace_phase = eval(ff.ask('CALC:DATA:FDAT? \r'))
v_dummy=np.linspace(p,p,len(f_list))
data.add_data_point(v_dummy,f_list,trace_mlog, np.gradient(np.unwrap(np.deg2rad(trace_phase),np.pi)))
data.new_block()
spyview_process(data,f_start,f_stop,p)
qt.msleep(0.01)
data.close_file()
qt.mend()
| [
"[email protected]"
] | |
eddb1083f72d566a9ba78588b02c0de1582230e7 | 8cb101991346bd6403cfaca88b0445f917e52254 | /tuneuptechnology/tickets.py | d5ccf178b5ba56d3c933e59ce7abdad16b3a0163 | [
"MIT",
"Python-2.0"
] | permissive | TrendingTechnology/tuneuptechnology-python | a06742fbf404fb1afc525ccf1d432c4c374866f1 | 479bbece1722f7e233dbc0f7642205e1afa971c1 | refs/heads/main | 2023-06-08T17:26:41.108769 | 2021-06-22T02:15:45 | 2021-06-22T02:15:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | class Tickets():
def __init__(self, base_url, make_http_request):
self.base_url = base_url
self.make_http_request = make_http_request
def create(self, data):
"""Create a ticket based on the data passed"""
endpoint = f'{self.base_url}/tickets'
response = self.make_http_request('post', endpoint, data)
return response
def all(self):
"""Retrieve all tickets"""
endpoint = f'{self.base_url}/tickets'
response = self.make_http_request('get', endpoint)
return response
def retrieve(self, id):
"""Retrieve a single ticket"""
endpoint = f'{self.base_url}/tickets/{id}'
response = self.make_http_request('get', endpoint)
return response
def update(self, id, data):
"""Update a ticket with the passed params"""
endpoint = f'{self.base_url}/tickets/{id}'
response = self.make_http_request('patch', endpoint, data)
return response
def delete(self, id):
"""Delete a ticket with the ID passed"""
endpoint = f'{self.base_url}/tickets/{id}'
response = self.make_http_request('delete', endpoint)
return response
| [
"[email protected]"
] | |
3ba4d9a3323a8bb7a9dd944f28bff4943cd98968 | 266947fd84eed629ed0c21f6d91134239512afd9 | /BeginnerContest_B/061.py | 8605568528a2b38ba911c5cdf7aae2aba95aad32 | [] | no_license | SkiMsyk/AtCoder | c86adeec4fa470ec14c1be7400c9fc8b3fb301cd | 8102b99cf0fb6d7fa304edb942d21cf7016cba7d | refs/heads/master | 2022-09-03T01:23:10.748038 | 2022-08-15T01:19:55 | 2022-08-15T01:19:55 | 239,656,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | N, M = map(int, input().split())
res = [0]*N
for _ in range(M):
a, b = map(int, input().split())
res[a-1] += 1
res[b-1] += 1
for _ in range(N):
print(res[_]) | [
"[email protected]"
] | |
3740b278f395768c4a255c2166677022992d93a9 | 85574bab97569bae7368dc4e2d2aa73c73743a9b | /DSPFromGroundUp/Python/016RunningSumV2/main.py | 9bf1bb6d344340923a786a4e595a379f76fda9cf | [] | no_license | saradhimpardha/UdemyDSPFromGroundUpOnARMProcessors | 3c0fcd7272e892f222871dc412fc214851477aea | 576d4a38992533ed0733278d6b4b6444db58706b | refs/heads/main | 2023-05-04T15:45:30.184864 | 2021-05-28T14:40:46 | 2021-05-28T14:40:46 | 458,248,148 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | #
# Imports
#
from matplotlib import pyplot as plt
from matplotlib import style
import mysignals as sigs
#
# Global variables
#
#
# Private functions
#
#
def calc_running_sum(sig_src_arr,sig_dest_arr):
for x in range(len(sig_dest_arr)):
sig_dest_arr[x] = 0
for x in range(len(sig_src_arr)):
sig_dest_arr[x] = sig_dest_arr[x-1]+sig_src_arr[x]
#
# main
#
if __name__ == "__main__":
output_signal =[None]*320
calc_running_sum(sigs.InputSignal_1kHz_15kHz,output_signal)
#
style.use('ggplot')
#style.use('dark_background')
f,plt_arr = plt.subplots(2,sharex=True)
f.suptitle("Running Sum")
plt_arr[0].plot(sigs.InputSignal_1kHz_15kHz,color='red')
plt_arr[0].set_title("Input Signal")
plt_arr[1].plot(output_signal,color ='magenta')
plt_arr[1].set_title("Output Signal")
plt.show()
| [
"[email protected]"
] | |
0751238896833b73c9818850c8150c8aff389c4b | f4b74154a7e50a9cfd325b45046b6c86c1682847 | /src/settings.py | ccb98801ea3144dc52ce825ead5c542150f3330b | [] | no_license | toxicOxygen/personal_website- | 826225a979ef0e62aaddf9730d1fd5d533400310 | 1826ef3de43fc4d162a509f48a1f90392ac136e5 | refs/heads/master | 2021-09-23T09:28:51.103637 | 2020-03-30T02:12:58 | 2020-03-30T02:12:58 | 251,178,977 | 0 | 0 | null | 2021-09-22T18:54:41 | 2020-03-30T02:13:38 | HTML | UTF-8 | Python | false | false | 3,619 | py | """
Django settings for src project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=$%2g7+8uw(qd3##ayde181009u=1$40xpz=aqg4#)5&80oji7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'js_projects',
'pages'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static'),]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'uksoiwcaeargewci'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# django_heroku.settings(locals()) | [
"[email protected]"
] | |
27cb43ce03426ae33a2a613a5b551d5332371f3c | 4a995ce459f42c372d548eb397e95a7793b8b965 | /cursoshandler/models.py | fe22f8853dc98344edddb248959e587b5692a3c5 | [] | no_license | astandre/chatbot-system | edb1d1835fe61a2140bad53e7f68ce2bc724018a | 99aab3e1e63a05bd475c5af8733b8c771d5e69f5 | refs/heads/master | 2022-12-12T01:37:13.498987 | 2018-10-13T23:03:03 | 2018-10-13T23:03:03 | 145,641,189 | 0 | 0 | null | 2022-12-08T02:47:10 | 2018-08-22T01:49:28 | Python | UTF-8 | Python | false | false | 2,784 | py | from neomodel import *
from neomodel import install_all_labels, remove_all_labels
# remove_all_labels()
# install_all_labels()
# clear_neo4j_database(db)
# TODO set label and help_text
class Curso(StructuredNode):
uid = UniqueIdProperty()
nombre = StringProperty(required=True, unique_index=True)
cod = StringProperty(unique=True, required=True)
descripcion = StringProperty(required=False)
pre_requisitos = StringProperty(required=False)
edicion = StringProperty(required=False)
oferta = StringProperty(required=False)
tematica = StringProperty(required=False)
fecha_inscripcion = DateProperty(default_now=True)
fecha_inicio = DateProperty(default_now=True)
esfuerzo_estimado = StringProperty(default=0)
duracion = StringProperty(required=False)
link = StringProperty(default="http://opencampus.utpl.edu.ec/")
INSTITUCIONES = {
"U": "UTPL",
"O": "Otro",
}
institucion = StringProperty(choices=INSTITUCIONES, default="U")
archivado = BooleanProperty(default=False)
docente = RelationshipTo('Docente', 'HAS_A_DOCENTE', cardinality=OneOrMore)
competencia = RelationshipTo('Competencia', 'HAS_A_COMPETENCIA', cardinality=OneOrMore)
reto = RelationshipTo('Reto', 'HAS_A_RETO', cardinality=OneOrMore)
contenido = RelationshipTo('Contenido', 'HAS_A_CONTENIDO', cardinality=OneOrMore)
sinonimo = RelationshipTo('Sinonimo', 'HAS_A_SINONIMO', cardinality=OneOrMore)
class Docente(StructuredNode):
uid = UniqueIdProperty()
nombre = StringProperty(unique_index=True, required=True)
N_ACADEMICO = {
"TN": "Nivel Técnico",
"CN": "Tercer Nivel",
"T": "Cuarto Nivel",
}
nivel_academico = StringProperty(default="T", choices=N_ACADEMICO)
email = EmailProperty(required=False)
resumen = StringProperty(required=False)
curso = RelationshipTo('Curso', 'TEACHES', cardinality=OneOrMore)
class Competencia(StructuredNode):
competencia = StringProperty(unique=True, required=True)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
class Reto(StructuredNode):
titulo_reto = StringProperty(unique=True, required=True)
fecha_inicio = DateTimeProperty(default_now=True)
fecha_fin = DateTimeProperty(default_now=True)
descripcion = StringProperty(required=False)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
class Contenido(StructuredNode):
orden = StringProperty(required=True)
contenido = StringProperty(unique=True, required=True)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
class Sinonimo(StructuredNode):
sinonimo = StringProperty(required=True, unique_index=True)
curso = RelationshipTo(Curso, 'IS_FROM', cardinality=OneOrMore)
| [
"[email protected]"
] | |
45da61cb3415eb8e07c8366c7b8f0ed58e3c101e | 982539edb302b6bee5dd9285e9de00ad866b4cfd | /Tongji/Mode/PlatUserConf.py | 0128446c9127bba03b5fb549b02ac0e89e624e1b | [] | no_license | chennqqi/OpenSaaSProj | 2149a2066c607636ce2106801be2cb722cc0934d | 0f861a61d1bd1499599207a70a8e180930d96573 | refs/heads/master | 2020-04-04T16:14:08.943396 | 2017-06-01T06:50:32 | 2017-06-01T06:50:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | # -*- coding: utf-8 -*-
from datetime import date
from pony.orm import *
def define_user_conf(db):
class Plat_user_conf(db.Entity):
id = PrimaryKey(int, sql_type="int(20)", auto=True)
tm = Optional(date)
ver = Optional(str)
pub = Optional(str)
nameid = Optional(str)
vshow = Optional(str)
vtype = Optional(str)
return Plat_user_conf
if __name__ == "__main__":
a = Database()
define_user_conf(a)
a.bind("mysql", host="outjhkj01.mysql.rds.aliyuncs.com", port=3306, user="jhkj", passwd="jhkj_jhkj", db="saas_meta")
a.generate_mapping(create_tables=True)
b = Database()
define_user_conf(b)
b.bind("mysql", host="outjhkj01.mysql.rds.aliyuncs.com", port=3306, user="jhkj", passwd="jhkj_jhkj", db="guaengdemo")
a.disconnect()
b.disconnect()
b.generate_mapping(create_tables=True)
# db.drop_table("plat_event")
# tester = Plat_event()
# b = Database()
# setDB(b)
# db.bind("mysql", host="outjhkj01.mysql.rds.aliyuncs.com", port=3306, user="jhkj", passwd="jhkj_jhkj", db="guaengdemo")
# db.generate_mapping(create_tables=True)
# tester = Plat_event() | [
"[email protected]"
] | |
94b48fd60ae2a1848557d45847013a281ca0bb72 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/VoucherAvailableOutItemInfo.py | d0133f8f8df9195637a3cad5457d1610e907a92c | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,440 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class VoucherAvailableOutItemInfo(object):
def __init__(self):
self._item_app_id = None
self._out_item_id = None
@property
def item_app_id(self):
return self._item_app_id
@item_app_id.setter
def item_app_id(self, value):
self._item_app_id = value
@property
def out_item_id(self):
return self._out_item_id
@out_item_id.setter
def out_item_id(self, value):
self._out_item_id = value
def to_alipay_dict(self):
params = dict()
if self.item_app_id:
if hasattr(self.item_app_id, 'to_alipay_dict'):
params['item_app_id'] = self.item_app_id.to_alipay_dict()
else:
params['item_app_id'] = self.item_app_id
if self.out_item_id:
if hasattr(self.out_item_id, 'to_alipay_dict'):
params['out_item_id'] = self.out_item_id.to_alipay_dict()
else:
params['out_item_id'] = self.out_item_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = VoucherAvailableOutItemInfo()
if 'item_app_id' in d:
o.item_app_id = d['item_app_id']
if 'out_item_id' in d:
o.out_item_id = d['out_item_id']
return o
| [
"[email protected]"
] | |
bcf1581afef31e7569bc8ef68a094cb8fad143ea | 70f5f279e051360310f95be895320d8fa6cd8d93 | /extraPackages/matplotlib-3.0.2/examples/userdemo/connectionstyle_demo.py | 1ea2bf5fe8fd2ff9ac9da4674adfb762654d93bd | [
"BSD-3-Clause"
] | permissive | spacetime314/python3_ios | 4b16ab3e81c31213b3db1e1eb00230621b0a7dc8 | e149f1bc2e50046c8810f83dae7739a8dea939ee | refs/heads/master | 2020-05-09T20:39:14.980041 | 2019-04-08T15:07:53 | 2019-04-08T15:07:53 | 181,415,024 | 2 | 0 | BSD-3-Clause | 2019-04-15T05:00:14 | 2019-04-15T05:00:12 | null | UTF-8 | Python | false | false | 1,845 | py | """
====================
Connectionstyle Demo
====================
"""
import matplotlib.pyplot as plt
fig, axs = plt.subplots(3, 5, figsize=(8, 4.8))
x1, y1 = 0.3, 0.3
x2, y2 = 0.7, 0.7
def demo_con_style(ax, connectionstyle, label=None):
x1, y1 = 0.3, 0.2
x2, y2 = 0.8, 0.6
ax.plot([x1, x2], [y1, y2], ".")
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
color="0.5",
shrinkA=5, shrinkB=5,
patchA=None,
patchB=None,
connectionstyle=connectionstyle,
),
)
ax.text(.05, .95, connectionstyle.replace(",", ",\n"),
transform=ax.transAxes, ha="left", va="top")
demo_con_style(axs[0, 0], "angle3,angleA=90,angleB=0")
demo_con_style(axs[1, 0], "angle3,angleA=0,angleB=90")
demo_con_style(axs[0, 1], "arc3,rad=0.")
demo_con_style(axs[1, 1], "arc3,rad=0.3")
demo_con_style(axs[2, 1], "arc3,rad=-0.3")
demo_con_style(axs[0, 2], "angle,angleA=-90,angleB=180,rad=0")
demo_con_style(axs[1, 2], "angle,angleA=-90,angleB=180,rad=5")
demo_con_style(axs[2, 2], "angle,angleA=-90,angleB=10,rad=5")
demo_con_style(axs[0, 3], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=0")
demo_con_style(axs[1, 3], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=5")
demo_con_style(axs[2, 3], "arc,angleA=-90,angleB=0,armA=0,armB=40,rad=0")
demo_con_style(axs[0, 4], "bar,fraction=0.3")
demo_con_style(axs[1, 4], "bar,fraction=-0.3")
demo_con_style(axs[2, 4], "bar,angle=180,fraction=-0.2")
for ax in axs.flat:
ax.set(xlim=(0, 1), ylim=(0, 1), xticks=[], yticks=[], aspect=1)
fig.tight_layout(pad=0)
plt.show()
| [
"[email protected]"
] | |
7d2794d66c8af7463d80b6feb07f0a139da4daf6 | 6f54ce52f08806075f0445e7dd206baae96ebdca | /IssueTracker/controllers/default.py | f6f0ad38bfb5a2d5fa0f37f28e66d7e27f9f3aff | [
"BSD-3-Clause"
] | permissive | ykanggit/web2py-appliances | a93d318a214aa5b3e5cd6b47b642f2c12addba46 | 5ca7a04d5403f04aad9e90e99e10dbc05a08a50a | refs/heads/master | 2022-05-06T08:55:11.089350 | 2022-04-14T19:25:02 | 2022-04-14T19:25:02 | 49,680,074 | 0 | 0 | null | 2016-01-14T22:41:45 | 2016-01-14T22:41:45 | null | UTF-8 | Python | false | false | 7,707 | py | # -*- coding: utf-8 -*-
def index():
return dict(message=T('Hello World'))
def projects():
#COLUMNS=('project.name','project.author','project.repo','project.license')
FIELDS=(db.project.id,db.project.name,db.project.created_by,db.project.manager,db.project.phase,db.project.repo)
LINKS=[lambda row: A('Subprojects',_href=URL('projects',args=row.id)),
lambda row: A('Issues',_href=URL('issues',args=row.id)),
lambda row: A('Team',_href=URL('teams',args=row.id)) ]
def check(row): return ((row.created_by == auth.user_id)|(row.manager == auth.user_id))
if (request.args(0)):
query = (db.project.super_project==request.args(0))
#name = 'The subprojects of: '+ str(db(db.project.id==request.args(0)).select(db.project.name)).lstrip('project.name ')
else:
query = db.project
#name = 'Project directory'
grid = SQLFORM.grid(query,editable=check,deletable=check,
fields = FIELDS,links=LINKS)
return dict(grid=grid)#name=name)
def teams():
def check(row):
return (row.team_lead == auth.user_id)
if (request.args(0)):
query = (db.team.assigned_projects==request.args(0))
else:
query = db.team
grid=SQLFORM.grid(query,editable=check,deletable=check)
return dict(grid=grid)
@auth.requires_membership('manager')
def roles():
manager_id = db(db.auth_group.role == 'manager').select().first().id
query = (db.auth_membership.group_id == manager_id)
grid = SQLFORM.grid(query,editable=False)
return dict(grid=grid)
def issues():
project = db.project(request.args(0)) or redirect(URL('projects'))
status = request.args(2)
#TODO- show issues of the subprojects
query = (db.issue.project == project.id)&(db.issue.is_last==True)
if (request.args(1)):
query = query&(db.issue.super_issue==request.args(1))
if not status or status=='Open':
query = query&(db.issue.status.belongs(['New','Assigned','Accepted','Started']))
elif status=='Closed':
query = query&(db.issue.status.belongs(
['Fixed','Verified','Invalid','Duplicate','WontFix','Done']))
elif status!='All':
query = query&(db.issue.status==status)
"""comment"""
from gluon.utils import web2py_uuid
db.issue.project.default = project.id
db.issue.uuid.default = web2py_uuid()
db.issue.is_last.default = True
db.issue.owner.default = project.created_by.email
db.issue.description.default = DESCRIPTION
db.issue.labels.represent = lambda v,r: ', '.join(v or [])
if not auth.user or not (
auth.user.id == project.created_by or \
auth.user.email in (project.members_email or [])):
db.issue.owner.writable = False
db.issue.status.writable = False
FIELDS=(db.issue.id,db.issue.uuid,db.issue.status,db.issue.summary,db.issue.created_on,db.issue.author,db.issue.labels,)
LINKS=[lambda row: A('Details',_href=URL('issue',args=row.uuid)),
lambda row: A('Sub-issues',_href=URL('issues',args=[project.id,row.id])),
lambda row2:A('Assignment',_href=URL('assign',args=row2.id)),
lambda row3: A('Escalate', _href=URL('escalate',args=row3.id))]
grid = SQLFORM.grid(query, fields = FIELDS,links=LINKS,
details=False,editable=False,
deletable=project.created_on==auth.user_id,
create=auth.user_id,args=[project.id],
oncreate=lambda form:do_mail([db.issue(form.vars.id)]))
return dict(grid=grid, project=project)
def issue():
last = db(db.issue.uuid==request.args(0))\
(db.issue.is_last==True).select().first()
project = db.project(last.project) or redirect(URL('projects'))
if auth.user:
db.issue.status.default = last.status
db.issue.summary.default = last.summary
db.issue.project.default = last.project
db.issue.uuid.default = last.uuid
db.issue.is_last.default = True
db.issue.owner.default = last.owner
db.issue.labels.default = last.labels
if not (auth.user.id == project.created_by or \
auth.user.email == last.owner or \
auth.user.email in (project.members_email or [])):
db.issue.owner.default = project.created_by
db.issue.owner.writable = False
db.issue.status.writable = False
form = SQLFORM(db.issue)
if form.process().accepted:
last.update_record(is_last=False)
else:
form = DIV('login to comment')
items = db(db.issue.uuid==request.args(0)).select(
orderby=db.issue.created_on)
if isinstance(form,FORM) and form.accepted: do_mail(items)
return dict(project=project,form=form,items=items,last=last)
@auth.requires_membership('manager')
def assign():
from datetime import datetime
if (request.args(0)):
query= (db.issue_assignment.issue==request.args(0))
else:
query=(db.issue_assignment)
FIELDS=(db.issue_assignment.issue,db.issue_assignment.assigned_by,\
db.issue_assignment.assigned_to,db.issue_assignment.assigned_date)
db.issue_assignment.assigned_by.default='%(first_name)s %(last_name)s' % auth.user
db.issue_assignment.assigned_by.writable=False
db.issue_assignment.assigned_date.default=datetime.now()
db.issue_assignment.assigned_date.writable=False
grid=SQLFORM.grid(query)
return dict(grid=grid)
@auth.requires_membership('manager')
def escalate():
issueID=request.args(0)
reference_project= db(db.issue.id==issueID).select().first()
super_proj = db(db.project.id==reference_project.project).select(db.project.super_project).first()
query = (db.issue.id==issueID)
if super_proj.super_project == None:
message = "Already a top level project"
else:
db(query).update(project=super_proj.super_project)
message= "The issue has been escalated"
session.flash = message
redirect(URL('projects'))
return dict()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id]
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs bust be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud())
| [
"[email protected]"
] | |
52327f791bad53af1e5f123f7f1b3f296bffe0bb | dc940e2aa628eff693af36584cfad935990ebe7d | /v3.1.0/tool/SaveBookInfoToMySqlTool.py | c32721874dd569c804662a6f57f96fbcb50f3b77 | [] | no_license | 520wsl/getXs8Novels | 865572ea488e0bf3d4e21664eb576237b6dd18be | ecf6d0bc5dfdbe4b5c3e8a9aac313bf7abce614b | refs/heads/master | 2020-04-18T00:59:56.777416 | 2019-02-15T08:52:11 | 2019-02-15T08:52:11 | 167,101,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '书籍数据存储工具类'
__author__ = 'Mad Dragon'
__mtime__ = '2019/1/24'
# 我不懂什么叫年少轻狂,只知道胜者为王
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import time
import moment
from tool.GetBookInfoTool import GetBookInfoTool
from public.DataTool import DataTool
from public.Logger import Logger
from public.MySqlTool import MySqlTool
class SaveBookInfoToMySqlToo():
def __init__(self, second, logger, getBookInfoToo, mySql, dataToo):
self.b_second = second
self.m_saveText = "INSERT INTO `links` (`url`,article) VALUES (%s, %s) ON DUPLICATE KEY UPDATE article = VALUES (article), nex = nex+1"
self.getBookInfoToo = getBookInfoToo
self.dataToo = dataToo
self.mySql = mySql
self.logger = logger
def saveText(self, link):
time.sleep(self.b_second)
content = self.getBookInfoToo.getTxtInfo(link)
if len(content) <= 0: return False
self.logger.debug('书籍 [ %s ] 文章存储' % (link))
return self.mySql.batchAdd(sql=self.m_saveText, data_info=[(link, content)])
def saveCatalog(self,bookId):
jsonData = self.getBookInfoToo.getCatalogInfo(bookId=bookId)
self.logger.debug(jsonData)
if __name__ == '__main__':
b_title = 'GetBookInfoToo'
b_second = 1
b_timeStr = moment.now().format('YYYY-MM-DD-HH-mm-ss')
dataToo = DataTool(logName=b_title, second=b_second, timeStr=b_timeStr)
logger = Logger(logname=dataToo.initLogName(), loglevel=1, logger=b_title).getlog()
mySql = MySqlTool(logName=dataToo.initLogName())
getBookInfoToo = GetBookInfoTool(second=b_second, dataToo=dataToo, logger=logger)
saveBookInfoToMySqlToo = SaveBookInfoToMySqlToo(second=b_second, logger=logger,
getBookInfoToo=getBookInfoToo,
mySql=mySql, dataToo=dataToo)
| [
"[email protected]"
] | |
2e4d4ad192fac1e61c9f8874b8b0b4a41791f5d5 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_4/Areseye/D_a.py | e133bb845631ab5660737f81985ed9f3e3c0f065 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 585 | py | #encoding:utf8
import os
import pdb
def solve(K,C,S):
ret = [1,]
gap = K**(C-1)
cur = 1
for i in range(0,K-1):
cur += gap
ret.append(cur)
return ret;
if __name__ == '__main__':
with open('d.in','r') as fin:
for ind,line in enumerate(fin):
if ind is 0:
T = int(line)
else:
strnum = line.split(' ')
param = map(int,strnum)
res = solve(*param)
resstr = map(str,res)
print 'Case #{}: {}'.format(ind,' '.join(resstr))
| [
"[[email protected]]"
] | |
b4f6555d72c6cacb9fa6eab225aff4ab94ddd2b0 | 77d93431ca903d7f97d0eaa1b46a98fc1b372f33 | /yugires/yugiscrapper.py | 5963d5449b223deede8380f9c5ca07ffd6a40b3c | [
"MIT"
] | permissive | DanielLSM/yugioh-high-res | 160ce52b8e0959add9d82b0595aa3f64ccc24689 | bc0cb2149f967fee46f58bdeed8ea089214f2290 | refs/heads/main | 2023-06-06T02:23:32.827861 | 2021-06-29T16:07:54 | 2021-06-29T16:07:54 | 381,405,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from tools impor
database_endpoint = 'https://db.ygoprodeck.com/api/v7/cardinfo.php'
| [
"[email protected]"
] | |
6b688c2274d062b107eef215f2f6857853970569 | 1333357d463006bb6540fb6f68f140c383d4e676 | /data/data_clean.py | 3bcfe42405065c6c3c2b46f2e42f29c6764ba91d | [] | no_license | markWJJ/classifynetwork | ced1ff5eaa9e1c7e9e6440e08e6744070689a305 | d65f22486434fdfbdce38d063e176eb31c5d7354 | refs/heads/master | 2023-01-09T03:12:01.540254 | 2018-09-17T07:50:02 | 2018-09-17T07:50:02 | 149,088,397 | 0 | 1 | null | 2022-12-21T03:34:27 | 2018-09-17T07:48:54 | Python | UTF-8 | Python | false | false | 8,090 | py | # -*- coding: UTF-8 -*-
import re
from collections import OrderedDict
import jieba
import codecs
from hanziconv import HanziConv
import os
import string
import json
import jieba.posseg as pseg
import numpy as np
FH_NUM = (
(u"0", u"0"), (u"1", u"1"), (u"2", u"2"), (u"3", u"3"), (u"4", u"4"),
(u"5", u"5"), (u"6", u"6"), (u"7", u"7"), (u"8", u"8"), (u"9", u"9"),
)
FH_NUM = dict(FH_NUM)
FH_ALPHA = (
(u"a", u"a"), (u"b", u"b"), (u"c", u"c"), (u"d", u"d"), (u"e", u"e"),
(u"f", u"f"), (u"g", u"g"), (u"h", u"h"), (u"i", u"i"), (u"j", u"j"),
(u"k", u"k"), (u"l", u"l"), (u"m", u"m"), (u"n", u"n"), (u"o", u"o"),
(u"p", u"p"), (u"q", u"q"), (u"r", u"r"), (u"s", u"s"), (u"t", u"t"),
(u"u", u"u"), (u"v", u"v"), (u"w", u"w"), (u"x", u"x"), (u"y", u"y"), (u"z", u"z"),
(u"A", u"A"), (u"B", u"B"), (u"C", u"C"), (u"D", u"D"), (u"E", u"E"),
(u"F", u"F"), (u"G", u"G"), (u"H", u"H"), (u"I", u"I"), (u"J", u"J"),
(u"K", u"K"), (u"L", u"L"), (u"M", u"M"), (u"N", u"N"), (u"O", u"O"),
(u"P", u"P"), (u"Q", u"Q"), (u"R", u"R"), (u"S", u"S"), (u"T", u"T"),
(u"U", u"U"), (u"V", u"V"), (u"W", u"W"), (u"X", u"X"), (u"Y", u"Y"), (u"Z", u"Z"),
)
FH_ALPHA = dict(FH_ALPHA)
NUM = (
(u"一", "1"), (u"二" ,"2"), (u"三", "3"), (u"四", "4"), (u"五", "5"), (u"六", "6"), (u"七", "7"),
(u"八", "8"), (u"九", "9"), (u"零", "0"), (u"十", "10")
)
NUM = dict(NUM)
CH_PUNCTUATION = u"["#$%&',:;@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·!?。。]"
EN_PUNCTUATION = u"['!#$%&\'()*+,-/:;<=>?@[\\]^_`{|}~']"
sub_dicit = {u"老师好":"",
u"老师":u"", u"你好":u"", u"您好":u"",
u"请问":u"", u"请":u"", u"谢谢":u"",
u""":u""}
class DataCleaner(object):
def __init__(self, params_path):
self.params_path = params_path
self.read_word()
self.read_synonym_word()
self.read_non_words()
def read_non_words(self):
word_path = self.params_path.get("non_words", "")
print("----non word path----", word_path)
if os.path.exists(word_path):
with codecs.open(word_path, "r", "utf-8") as f:
self.non_word = f.read().splitlines()
else:
self.non_word = None
print(self.non_word,"----non word----")
def calculate_non_word(self, input_string):
non_cnt = 0
if self.non_word:
word_cut = list(jieba.cut(input_string))
for word in self.non_word:
if word in word_cut:
non_cnt += 1
if np.mod(non_cnt, 2) == 0:
return 0
else:
return 1
def synthom_replacement(self, input_string):
cut_word_list = list(jieba.cut(input_string))
normalized_word_list = cut_word_list
for index, word in enumerate(cut_word_list):
if word in self.synonym_dict:
normalized_word_list[index] = self.synonym_dict[word]
return "".join(normalized_word_list)
def remove_stop_word(self, input_string):
cut_word_list = list(jieba.cut(input_string))
normalized_word_list = []
for word in cut_word_list:
if word in self.stop_word:
continue
else:
normalized_word_list.append(word)
return "".join(normalized_word_list)
def remove_symbol(self, input_string):
cn_text = re.sub(CH_PUNCTUATION, "", input_string)
en_text = re.sub(EN_PUNCTUATION, "", cn_text)
return en_text
def poc_clean(self, input_string):
tmp = self.upper2lower(input_string)
tmp = self.tra2sim(tmp)
tmp = self.full2half(tmp)
if self.synonym_dict:
tmp = self.synthom_replacement(tmp)
if self.stop_word:
nonstop_text = self.remove_stop_word(tmp)
if len(nonstop_text) >= 1:
tmp = nonstop_text
non_symbol_text = self.remove_symbol(tmp)
if len(non_symbol_text) >= 1:
tmp = non_symbol_text
char_pattern = re.compile(u"[\u4e00-\u9fa5,0-9,a-z,A-Z]+")
tmp = "".join(char_pattern.findall(tmp))
output = ""
for token in tmp:
if len(token) >= 1:
output += token
return output
def clean(self, input_string):
tmp = self.upper2lower(input_string)
tmp = self.tra2sim(tmp)
tmp = self.full2half(tmp)
return tmp
def read_word(self):
word_path = self.params_path.get("stop_word", "")
if os.path.exists(word_path):
with codecs.open(word_path, "r", "utf-8") as f:
self.stop_word = f.read().splitlines()
else:
print("not exiting params_path".format(word_path))
self.stop_word = None
def read_synonym_word(self):
self.synonym_dict = {}
synonym_path = self.params_path.get("synthom_path", "")
if os.path.exists(synonym_path):
with codecs.open(synonym_path, "r", "utf-8") as f:
data = f.read().splitlines()
for item in data:
content = item.split()
self.synonym_dict[content[0]] = content[1]
print(content[0], content[1])
else:
self.synonym_dict = None
def synonym_word_mapping(self):
self.synonym2standard = OrderedDict()
for key in self.synonym_dict:
for item in self.synonym_dict[key]:
self.synonym2standard[item] = key
def upper2lower(self, input_string):
return input_string.lower()
def subtoken(self, input_string):
tmp_string = input_string
for key in sub_dicit:
tmp_string = re.sub(key, sub_dicit[key], tmp_string)
return tmp_string
def lower2upper(self, input_string):
return input_string.upper()
def replace_phrase(input_string, phrase_dict):
s = input_string
for key in phrase_dict.keys():
s = re.sub(key, phrase_dict[key], s)
return s
def tra2sim(self, input_string):
s = HanziConv.toSimplified(input_string)
return s
def full2half(self, input_string):
s = ""
for uchar in input_string:
if uchar in FH_NUM:
half_char = FH_NUM[uchar]
if uchar in FH_ALPHA:
half_char = FH_ALPHA[uchar]
if uchar in NUM:
half_char = NUM[uchar]
else:
half_char = uchar
s += half_char
return s
def detect_en(self, input_string,
en_pattern=re.compile(u'[\u4e00-\u9fa5]'),
alphabet_pattern=re.compile(u"[a-cA-C]")):
s = []
for var in en_pattern.split(input_string.decode("utf-8")):
if len(var) > 1:
"""
if len(var) >= 1 it is a word or sentence
"""
s.append(var)
elif len(var) == 1:
"""
if len(var) == 1 it may be a alphabet and usually it is a choice for a given question
"""
tmp_var = alphabet_pattern.findall(var)
if len(tmp_var) == 1:
s.append(self.upper2lower(var))
return s
def detect_ch(self, input_string, ch_pattern = re.compile(u"[\u4e00-\u9fa5]+")):
s = ch_pattern.findall(input_string.decode("utf-8"))
s = " ".join(s)
return s
def sentence_segmentation(self, input_string, symbol_pattern=re.compile(CH_PUNCTUATION)):
"""
based on CH_PUNCTUATION to segment sentence
"""
return symbol_pattern.split(input_string.decode("utf-8")) | [
"[email protected]"
] | |
ac9bc264069f3b02a22624cafb6308e8ec8ae4bf | 79e19819aec49b500825f82a7de149eb6a0ba81d | /leetcode/303.py | f778311ff580a2a44b295e3a1440ef7bab29626f | [] | no_license | seoyeonhwng/algorithm | 635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26 | 90406ee75de69996e666ea505ff5d9045c2ad941 | refs/heads/master | 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | class NumArray:
def __init__(self, nums: List[int]):
self.nums = nums
if nums:
self.memo()
def memo(self):
self.dp = [0] * len(self.nums)
self.dp[0] = self.nums[0]
for i in range(1, len(self.nums)):
self.dp[i] = self.dp[i-1] + self.nums[i]
def sumRange(self, i: int, j: int) -> int:
return self.dp[j] - self.dp[i-1] if i > 0 else self.dp[j]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j) | [
"[email protected]"
] | |
723c8b2001a43c9aa112cd5eba3a02f98544b6f5 | 58ade65dffc7cbe103d93d7c769096a20d9f9815 | /src/smach_based_introspection_framework/online_part/data_collection/StoreVectorToRedisProc.py | d94e5e8102adc6af5a29654d7877be2d0b34a276 | [
"BSD-3-Clause"
] | permissive | birlrobotics/smach_based_introspection_framework | 2cff69ecec030a5b5046dea99f9e15105f52361b | f16742339cddfc86effba4dbf6e5062304704b89 | refs/heads/master | 2021-05-09T12:02:26.946473 | 2019-05-29T02:46:47 | 2019-05-29T02:46:47 | 119,001,821 | 7 | 1 | null | 2018-07-05T04:58:40 | 2018-01-26T03:37:58 | Python | UTF-8 | Python | false | false | 1,512 | py | import multiprocessing
from ConvertTagTopicToInterestedVectorProc import (
data_frame_idx,
smach_state_idx,
data_header_idx,
)
class StoreVectorToRedisProc(multiprocessing.Process):
def __init__(
self,
com_queue,
node_name="StoreVectorToRedisProc_node",
):
multiprocessing.Process.__init__(self)
self.com_queue = com_queue
self.node_name = node_name
def run(self):
import rospy
rospy.init_node(self.node_name, anonymous=True)
try:
import redis
import Queue
r = redis.Redis(host='localhost', port=6379, db=0)
rospy.loginfo('delete key \"tag_multimodal_msgs\": %s'%r.delete("tag_multimodal_msgs"))
while not rospy.is_shutdown():
try:
latest_data_tuple = self.com_queue.get(timeout=1)
except Queue.Empty:
continue
except KeyboardInterrupt:
break
data_frame = latest_data_tuple[data_frame_idx]
smach_state = latest_data_tuple[smach_state_idx]
data_header = latest_data_tuple[data_header_idx]
score = data_header.stamp.to_sec()
value = data_frame
r.zadd("tag_multimodal_msgs", value, score)
except Exception as e:
rospy.logerr("StoreVectorToRedisProc error: %s"%e)
rospy.loginfo("StoreVectorToRedisProc exits")
| [
"[email protected]"
] | |
7b5ebbb6b02299b7f47b6077cba156000ceeb9c3 | 8efe9a6c9489d798b5f5b610eb531d86924a1548 | /src/wix/urls.py | c74a0f134076e607c3999dbed8538b6643de2a2f | [] | no_license | MarekBiczysko/naklisze_public | e8e6f7e61cdb83b74ea68862b40c061c0253767b | e53c0e8fefffbcfc3a8859976eb7b81cf6270847 | refs/heads/master | 2022-12-12T02:27:09.824803 | 2019-07-23T10:54:47 | 2019-07-23T10:54:47 | 198,410,666 | 0 | 0 | null | 2022-12-08T01:03:08 | 2019-07-23T10:46:57 | Python | UTF-8 | Python | false | false | 288 | py | from django.views.generic import RedirectView
from django.conf.urls import url
from .views import wix_page
urlpatterns = [
# url(r'^$', RedirectView.as_view(url='https://biczysko.wix.com/foto')),
url(r'^$', wix_page, name='wix'),
url(r'^', RedirectView.as_view(url='/')),
]
| [
"[email protected]"
] | |
61abe84b1c8861332157ee57244832fe731b1498 | f9bcdd8fe51e94b884752574229bc592a84be6bd | /python/315_Count_of_Smaller_Numbers_After_Self.py | 33e899cb414d1e0faa68834085f58c7d725813e5 | [] | no_license | HankerZheng/LeetCode-Problems | cf46a24444cfc3e6bcff38c10a5bb5945e410b5b | d308e0e41c288f23a846b8505e572943d30b1392 | refs/heads/master | 2021-01-12T17:49:40.072069 | 2017-08-17T04:37:20 | 2017-08-17T04:37:20 | 69,397,987 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | # You are given an integer array nums and you have to return a new counts array. The counts array has the property where counts[i] is the number of smaller elements to the right of nums[i].
# Example:
# Given nums = [5, 2, 6, 1]
# To the right of 5 there are 2 smaller elements (2 and 1).
# To the right of 2 there is only 1 smaller element (1).
# To the right of 6 there is 1 smaller element (1).
# To the right of 1 there is 0 smaller element.
# Return the array [2, 1, 1, 0].
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.smallerCnt = 0
self.selfCnt = 1
def insert(self, val):
cnt = 0
tmp = self
while tmp:
if val < tmp.val:
tmp.smallerCnt += 1
if not tmp.left:
tmp.left = TreeNode(val)
break
tmp = tmp.left
elif val > tmp.val:
cnt += tmp.smallerCnt + tmp.selfCnt
if not tmp.right:
tmp.right = TreeNode(val)
break
tmp = tmp.right
else:
tmp.selfCnt += 1
cnt += tmp.smallerCnt
break
return cnt
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if len(nums) <= 1: return [0] * len(nums)
ans = [0]
dataTree = TreeNode(nums[-1])
for num in nums[-2::-1]:
ans.insert(0,dataTree.insert(num))
return ans | [
"[email protected]"
] | |
688bac0891c7135030e8bf35d07f7a9518baae31 | c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb | /Socket/GPS_Server_Test/GPS_Server_testData.py | 2d6e0b37aa0f879b89e87aa831bf512762c6fe1c | [] | no_license | GIS90/python_base_use | e55d55f9df505dac45ddd332fb65dcd08e8e531f | 7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1 | refs/heads/master | 2020-04-02T08:33:49.461307 | 2018-10-23T03:33:41 | 2018-10-23T03:33:41 | 154,249,857 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | # -*- coding: utf-8 -*-
"""
------------------------------------------------
describe:
this tool be used to
------------------------------------------------
"""
import SocketServer
import codecs
import datetime
import os
import threading
from SocketServer import BaseRequestHandler
SOCKET_DATA_MAX = 16 * 1024 * 1024
FORMMAT = "%Y%m%d%H%M%S"
def __get_cur_folder():
# if getattr(sys, "frozen", False):
return os.path.dirname(os.path.abspath(__file__))
# else:
# cur_folder = os.path.dirname(inspect.getfile(inspect.currentframe()))
# return os.path.abspath(cur_folder)
_cur_folder = __get_cur_folder()
_gps_file_folder = os.path.abspath(os.path.join(_cur_folder, "liveGPS"))
if not os.path.exists(_gps_file_folder):
os.makedirs(_gps_file_folder)
class TCPRequestHandler(BaseRequestHandler):
"""
The RequestHandler class for my server.
It is instantiated once per connection to the server, and must
override the handle method to implement communication to the
client.
"""
def setup(self):
BaseRequestHandler.setup(self)
def handle(self):
while True:
try:
data = self.request.recv(SOCKET_DATA_MAX).strip()
if data:
print data
gps_file_name = "gps.dat"
gps_file = os.path.join(_gps_file_folder, gps_file_name)
gps = codecs.open(gps_file, 'wb', 'utf-8')
gps.write(data)
gps.close()
except Exception as e:
print e.message
if __name__ == "__main__":
host = ""
port = 1991
addr = (host, port)
print "Server start ......"
# It use to
server = SocketServer.ThreadingTCPServer(addr, TCPRequestHandler)
server.allow_reuse_address = True
server.serve_forever()
| [
"[email protected]"
] | |
7e45a200414423d396becba56436abd46f1d731e | 66862c422fda8b0de8c4a6f9d24eced028805283 | /slambook2/3rdparty/opencv-3.3.0/samples/python/floodfill.py | 1b988d3763ef61c3f84e1e5039da4e6540f9914f | [
"MIT",
"BSD-3-Clause"
] | permissive | zhh2005757/slambook2_in_Docker | 57ed4af958b730e6f767cd202717e28144107cdb | f0e71327d196cdad3b3c10d96eacdf95240d528b | refs/heads/main | 2023-09-01T03:26:37.542232 | 2021-10-27T11:45:47 | 2021-10-27T11:45:47 | 416,666,234 | 17 | 6 | MIT | 2021-10-13T09:51:00 | 2021-10-13T09:12:15 | null | UTF-8 | Python | false | false | 2,007 | py | #!/usr/bin/env python
'''
Floodfill sample.
Usage:
floodfill.py [<image>]
Click on the image to set seed point
Keys:
f - toggle floating range
c - toggle 4/8 connectivity
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
if __name__ == '__main__':
import sys
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
print(__doc__)
img = cv2.imread(fn, True)
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
h, w = img.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
seed_pt = None
fixed_range = True
connectivity = 4
def update(dummy=None):
if seed_pt is None:
cv2.imshow('floodfill', img)
return
flooded = img.copy()
mask[:] = 0
lo = cv2.getTrackbarPos('lo', 'floodfill')
hi = cv2.getTrackbarPos('hi', 'floodfill')
flags = connectivity
if fixed_range:
flags |= cv2.FLOODFILL_FIXED_RANGE
cv2.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
cv2.circle(flooded, seed_pt, 2, (0, 0, 255), -1)
cv2.imshow('floodfill', flooded)
def onmouse(event, x, y, flags, param):
global seed_pt
if flags & cv2.EVENT_FLAG_LBUTTON:
seed_pt = x, y
update()
update()
cv2.setMouseCallback('floodfill', onmouse)
cv2.createTrackbar('lo', 'floodfill', 20, 255, update)
cv2.createTrackbar('hi', 'floodfill', 20, 255, update)
while True:
ch = cv2.waitKey()
if ch == 27:
break
if ch == ord('f'):
fixed_range = not fixed_range
print('using %s range' % ('floating', 'fixed')[fixed_range])
update()
if ch == ord('c'):
connectivity = 12-connectivity
print('connectivity =', connectivity)
update()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
fe547b0d6f92c919781366e3a1059ab975ea9b14 | 725abfa74e3800622837e60615dc15c6e91442c0 | /venv/Lib/site-packages/django/contrib/messages/storage/session.py | 7dbd24a8da5c105a8955f5695fe53d22b05df70b | [] | no_license | Malak-Abdallah/TODOlist | 4840e2e0a27e6499ae6b37524bb3e58455d08bfb | fd35754e8eac9b262fae17ec16ad9fb510a12f5d | refs/heads/master | 2023-07-16T11:38:48.759232 | 2021-08-31T09:43:11 | 2021-08-31T09:43:11 | 401,600,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,669 | py | import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import MessageDecoder, MessageEncoder
class SessionStorage(BaseStorage):
"""
Store messages in the session (that is, django.contrib.sessions).
"""
session_key = "_messages"
def __init__(self, request, *args, **kwargs):
assert hasattr(request, "session"), (
"The session-based temporary "
"message storage requires session middleware to be installed, "
"and come before the message middleware in the "
"MIDDLEWARE list."
)
super().__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieve a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return (
self.deserialize_messages(self.request.session.get(self.session_key)),
True,
)
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder()
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, str):
return json.loads(data, cls=MessageDecoder)
return data
| [
"[email protected]"
] | |
8770db87586708d0a54dd67e1a2975ec6317d52b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2772/8317/301956.py | b1c4c2bd1a959d947400f88d06dd30c9659cc1b4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def solve():
num = int(input())
for _ in range(num):
n = int(input())
print(pow(n, 1/3))
solve() | [
"[email protected]"
] | |
0c49b984bf9f2ac8bae5046c1f435df4c90cd46f | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SSD/mmdet/models/builder.py | 05efb838ed26ce7d0c12f1cdf8a678b15d583bdd | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,225 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mmcv.utils import Registry, build_from_cfg
from torch import nn
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
ROI_EXTRACTORS = Registry('roi_extractor')
SHARED_HEADS = Registry('shared_head')
HEADS = Registry('head')
LOSSES = Registry('loss')
DETECTORS = Registry('detector')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Defaults to None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
def build_roi_extractor(cfg):
"""Build roi extractor."""
return build(cfg, ROI_EXTRACTORS)
def build_shared_head(cfg):
"""Build shared head."""
return build(cfg, SHARED_HEADS)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_detector(cfg, train_cfg=None, test_cfg=None):
"""Build detector."""
return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
| [
"[email protected]"
] | |
de953e1a133d796d7c348777274fe9a4eb25f67e | ddb7916c3962713471044f03bd76414581dbf801 | /Myadmin/templatetags/get_table_rela_name.py | 57099dd00e91e49ac1775475fd5f2fe0ad581a24 | [] | no_license | so1so2so/SuperCrm | 92949819ea2200edd818bfafce8fd2c5ca99076a | ba17faa55b13a611fc579006994af6f0f836764b | refs/heads/master | 2020-03-06T18:24:11.238838 | 2018-05-08T13:42:27 | 2018-05-08T13:42:27 | 127,006,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,845 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def get_rela_name(table_obj):
table_name = table_obj.model._meta.verbose_name_plural or table_obj.verbose_name
if not table_name:
table_name = table_obj.model._meta.model_mame
return table_name
@register.simple_tag
def get_chinese_name(table_obj):
if hasattr(table_obj._meta, 'verbose_name_plural'):
return table_obj._meta.verbose_name_plural
elif hasattr(table_obj._meta, 'verbose_name'):
return table_obj._meta.verbose_name
else:
return table_obj._meta.model_mame
@register.simple_tag
def build_table_row(request, one_obj_django, obj_all_model_and_display):
row_ele = ""
for index, filed in enumerate(obj_all_model_and_display.list_display):
field_obj = one_obj_django._meta.get_field(filed)
if field_obj.choices: # choices type
column_data = getattr(one_obj_django, "get_%s_display" % filed)()
else:
column_data = getattr(one_obj_django, filed)
if type(column_data).__name__ == 'datetime':
column_data = column_data.strftime("%Y-%m-%d %H:%M:%S")
if type(field_obj).__name__ == "ManyToManyField":
all_date = getattr(field_obj, 'get_choices')()[1:]
for choice_item in all_date:
if str(choice_item[0]) == one_obj_django:
pass
if index == 0: # add <a></a> tag
column_data = "<a href='{request_path}/{obj_id}/change' target='_self'>{date}</a>".format(
request_path=request.path,
obj_id=one_obj_django.id,
date=column_data,
)
row_ele += "<td>%s</td>" % column_data
# print row_ele
return mark_safe(row_ele)
@register.simple_tag
def render_page_ele(loop_counter, query_sets, filter_condtions, order, search):
filters = ''
for k, v in filter_condtions.items():
filters += "&%s=%s" % (k, v)
if not order:
order = ''
if not search:
search = ''
if loop_counter < 3 or loop_counter > query_sets.paginator.num_pages - 2: # 显示前2页,或者最后2页
ele_class = ""
if query_sets.number == loop_counter:
ele_class = "active"
ele = '''<li class="%s"><a href="?page=%s%s&o=%s&q=%s">%s</a></li>''' % (
ele_class, loop_counter, filters, order, search, loop_counter)
return mark_safe(ele)
if abs(query_sets.number - loop_counter) <= 1:
ele_class = ""
if query_sets.number == loop_counter:
ele_class = "active"
ele = '''<li class="%s"><a href="?page=%s%s">%s</a></li>''' % (ele_class, loop_counter, filters, loop_counter)
return mark_safe(ele)
return ''
@register.simple_tag
def render_filter_ele(condtion, obj_all_model_and_display, filter_condtions):
select_ele = '''<select class="form-control" name='%s' ><option value=''>----</option>''' % condtion
# 拿到每一个需要filter的值
field_obj = obj_all_model_and_display.model._meta.get_field(condtion)
if field_obj.choices:
selected = ''
# 这个循环会循环所有的choices ((0, '已报名'), (1, '未报名'), (2, '已退学'), (3, '其他'))
for choice_item in field_obj.choices:
# 判断filter_condtions这个字典 {u'source': u'1', u'consultant': u'2'}
# print("choice", choice_item, filter_condtions.get(condtion), type(filter_condtions.get(condtion)))
# 如果前端传递来的值的
if filter_condtions.get(condtion) == str(choice_item[0]):
selected = "selected"
select_ele += '''<option value='%s' %s>%s</option>''' % (choice_item[0], selected, choice_item[1])
selected = ''
if type(field_obj).__name__ == "ForeignKey":
selected = ''
for choice_item in field_obj.get_choices()[1:]:
if filter_condtions.get(condtion) == str(choice_item[0]):
selected = "selected"
select_ele += '''<option value='%s' %s>%s</option>''' % (choice_item[0], selected, choice_item[1])
selected = ''
if type(field_obj).__name__ == "ManyToManyField":
selected = ''
for choice_item in field_obj.get_choices()[1:]:
# print filter_condtions.get(condtion)
if filter_condtions.get(condtion) == str(choice_item[0]):
selected = "selected"
select_ele += '''<option value='%s' %s>%s</option>''' % (choice_item[0], selected, choice_item[1])
selected = ''
select_ele += "</select>"
return mark_safe(select_ele)
@register.simple_tag
def change_order(column):
if column.startswith("-"):
column = column.strip("-")
else:
column = "-%s" % column
return column
@register.simple_tag
def get_all_m2m_list(obj_all_model_and_display, field, form_obj):
"""
:param obj_all_model_and_display:
:param field:
:param form_obj:
:return: 返还m2m所有待选数据
"""
# models.Customer.tags.rel.to.objects.all()
# obj_all_model_and_display.model=models.Customer
# print obj_all_model_and_display.model
if hasattr(obj_all_model_and_display.model, field.name):
field_all_obj = getattr(obj_all_model_and_display.model, field.name).rel.to.objects.all()
# print field_all_obj
# 相当于field_obj =models.Customer.tags.
# 类似 getattr(d,'tags').rel.to.objects.all()
# print field_all_obj.intersection(field_select_obj)
# "返还全部的减去待选的"
if hasattr(form_obj.instance, field.name):
field_select_obj = getattr(form_obj.instance, field.name).all()
return field_all_obj.difference(field_select_obj)
else:
return field_all_obj
# return (field_select_obj|field_all_obj).distinct()
@register.simple_tag
def print_obj_(obj):
return obj.instance
@register.simple_tag
def get_select_m2m_list(form_obj, field):
"""
:param form_obj:
:param field:
:return: {{ form_obj.instance.tags.all }}
form_obj= new_model_form(instance=table_obj)
返还已选择的
"""
if hasattr(form_obj.instance, field.name):
field_select_obj = getattr(form_obj.instance, field.name)
return field_select_obj.all()
else:
return ""
def recursive_related_objs_lookup(objs):
print "objs", objs
# model_name = objs[0]._meta.model_name
ul_ele = "<ul>"
for obj in objs:
li_ele = '''<li> %s: %s </li>''' % (obj._meta.verbose_name, obj.__unicode__().strip("<>"))
ul_ele += li_ele
# for local many to many
# print("------- obj._meta.local_many_to_many", obj._meta.local_many_to_many)
for m2m_field in obj._meta.local_many_to_many: # 把所有跟这个对象直接关联的m2m字段取出来了
sub_ul_ele = "<ul>"
m2m_field_obj = getattr(obj, m2m_field.name) # getattr(customer, 'tags')
for o in m2m_field_obj.select_related(): # customer.tags.select_related()
li_ele = '''<li> %s: %s </li>''' % (m2m_field.verbose_name, o.__unicode__().strip("<>"))
sub_ul_ele += li_ele
sub_ul_ele += "</ul>"
ul_ele += sub_ul_ele # 最终跟最外层的ul相拼接
for related_obj in obj._meta.related_objects:
if 'ManyToManyRel' in related_obj.__repr__():
if hasattr(obj, related_obj.get_accessor_name()): # hassattr(customer,'enrollment_set')
accessor_obj = getattr(obj, related_obj.get_accessor_name())
print("-------ManyToManyRel", accessor_obj, related_obj.get_accessor_name())
# 上面accessor_obj 相当于 customer.enrollment_set
if hasattr(accessor_obj, 'select_related'): # slect_related() == all()
target_objs = accessor_obj.select_related() # .filter(**filter_coditions)
# target_objs 相当于 customer.enrollment_set.all()
sub_ul_ele = "<ul style='color:red'>"
for o in target_objs:
li_ele = '''<li> %s: %s </li>''' % (o._meta.verbose_name, o.__unicode__().strip("<>"))
sub_ul_ele += li_ele
sub_ul_ele += "</ul>"
ul_ele += sub_ul_ele
elif hasattr(obj, related_obj.get_accessor_name()): # hassattr(customer,'enrollment_set')
accessor_obj = getattr(obj, related_obj.get_accessor_name())
# 上面accessor_obj 相当于 customer.enrollment_set
if hasattr(accessor_obj, 'select_related'): # slect_related() == all()
target_objs = accessor_obj.select_related() # .filter(**filter_coditions)
# target_objs 相当于 customer.enrollment_set.all()
else:
print("one to one i guess:", accessor_obj)
target_objs = accessor_obj
if len(target_objs) > 0:
# print("\033[31;1mdeeper layer lookup -------\033[0m")
# nodes = recursive_related_objs_lookup(target_objs,model_name)
nodes = recursive_related_objs_lookup(target_objs)
ul_ele += nodes
ul_ele += "</ul>"
return ul_ele
@register.simple_tag
def display_obj_related(objs):
'''把对象及所有相关联的数据取出来'''
# objs = [objs] # fake
# if objs:
# model_class = objs[0]._meta.model # <class 'crm.models.Customer'>
# mode_name = objs[0]._meta.model_name # customer
return mark_safe(recursive_related_objs_lookup(objs))
@register.simple_tag
def display_no_exist(one_obj_django, filed,table_name):
return mark_safe('''<a href="%s/%s/%s">点击报名</a>''' % (str(table_name),one_obj_django.id, filed))
@register.simple_tag
def get_filed_chinese_name(column, obj_all_model_and_display):
"""
models.Customer._meta.get_field('tags').verbose_name
:param column:
:param obj_all_model_and_display:
:return:
"""
# print obj_all_model_and_display.model._meta.get_field('tags').verbose_name
chinese_chinses_obj = obj_all_model_and_display.model._meta.get_field(column)
return chinese_chinses_obj.verbose_name
@register.simple_tag
def get_types(stra):
print stra.dispaly_name
return type(stra)
@register.simple_tag
def get_action_verbose_name(admin_class,action):
if hasattr(admin_class,action):
action_func = getattr(admin_class,action)
return action_func.display_name if hasattr(action_func,'display_name') else action | [
"[email protected]"
] | |
d06ab34fea0bac11e8aa864a35184490730e2a5a | 02b495111594a367405b2bfbf220e38da3a5f7b0 | /devel/lib/python2.7/dist-packages/brics_actuator/msg/_JointValue.py | 0723b3357a381bbe1f9fbd1dbb79f58932d32bef | [
"BSD-2-Clause"
] | permissive | Ashuditya/Rebellious-Cowards | 3f7c6afd314e4bf2ffb72b99ecf58be23f309e97 | 56ec395147f2fc59a26669a74a04fe02227bc7b7 | refs/heads/master | 2023-01-24T10:57:47.533839 | 2020-10-01T15:58:07 | 2020-10-01T15:58:07 | 218,202,193 | 0 | 3 | BSD-2-Clause | 2020-10-01T17:07:44 | 2019-10-29T04:09:46 | Makefile | UTF-8 | Python | false | false | 6,583 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from brics_actuator/JointValue.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
class JointValue(genpy.Message):
_md5sum = "c8dad5a006889ad7de711a684999f0c6"
_type = "brics_actuator/JointValue"
_has_header = False #flag to mark the presence of a Header object
_full_text = """time timeStamp #time of the data
string joint_uri
string unit #if empy expects si units, you can use boost::unit
float64 value
"""
__slots__ = ['timeStamp','joint_uri','unit','value']
_slot_types = ['time','string','string','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
timeStamp,joint_uri,unit,value
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(JointValue, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.timeStamp is None:
self.timeStamp = genpy.Time()
if self.joint_uri is None:
self.joint_uri = ''
if self.unit is None:
self.unit = ''
if self.value is None:
self.value = 0.
else:
self.timeStamp = genpy.Time()
self.joint_uri = ''
self.unit = ''
self.value = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2I().pack(_x.timeStamp.secs, _x.timeStamp.nsecs))
_x = self.joint_uri
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.unit
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_d().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.timeStamp is None:
self.timeStamp = genpy.Time()
end = 0
_x = self
start = end
end += 8
(_x.timeStamp.secs, _x.timeStamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_uri = str[start:end].decode('utf-8')
else:
self.joint_uri = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.unit = str[start:end].decode('utf-8')
else:
self.unit = str[start:end]
start = end
end += 8
(self.value,) = _get_struct_d().unpack(str[start:end])
self.timeStamp.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2I().pack(_x.timeStamp.secs, _x.timeStamp.nsecs))
_x = self.joint_uri
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.unit
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_d().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.timeStamp is None:
self.timeStamp = genpy.Time()
end = 0
_x = self
start = end
end += 8
(_x.timeStamp.secs, _x.timeStamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_uri = str[start:end].decode('utf-8')
else:
self.joint_uri = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.unit = str[start:end].decode('utf-8')
else:
self.unit = str[start:end]
start = end
end += 8
(self.value,) = _get_struct_d().unpack(str[start:end])
self.timeStamp.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_d = None
def _get_struct_d():
global _struct_d
if _struct_d is None:
_struct_d = struct.Struct("<d")
return _struct_d
| [
"[email protected]"
] | |
f32e61acab543b074d8350bb2c926e937628cbb7 | 97f285b6f8016a8d1d2d675fffb771df3c9e37b9 | /study/algorithms/sorting/selection_sort.py | b1177b6f5b9e1b1dd7feb0d3974b2999b7447124 | [] | no_license | oskomorokhov/python | ef5408499840465d18852954aee9de460d0e7250 | 8909396c4200bd2fca19d3f216ed5f484fb2192a | refs/heads/master | 2021-05-14T09:27:25.413163 | 2019-12-12T21:00:05 | 2019-12-12T21:00:05 | 116,327,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | # selection sort
def ssort(lst):
""" The algorithm divides the input list into two parts: the sublist of items already sorted, which is built up from left to right at the front (left) of the list,
and the sublist of items remaining to be sorted that occupy the rest of the list. Initially, the sorted sublist is empty and the unsorted sublist is the entire input list.
The algorithm proceeds by finding the smallest (or largest, depending on sorting order) element in the unsorted sublist, exchanging (swapping) it with the leftmost unsorted element (putting it in sorted order),
and moving the sublist boundaries one element to the right.
"""
pivot = 0
while pivot < len(lst):
current_min = lst[pivot]
new_min = None
for num in lst[pivot+1:]:
if num < current_min:
current_min = new_min = num
if new_min:
lst[lst.index(new_min)
], lst[pivot] = lst[pivot], lst[lst.index(new_min)]
pivot += 1
return lst
if __name__ == '__main__':
print("original list", [3, 44, 38, 5, 47,
15, 36, 26, 27, 2, 46, 4, 19, 50, 48])
print(ssort([3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]))
| [
"[email protected]"
] | |
8ca1e09fb7ee173a14faeb5049dd0aa0737a9ba0 | eff2fc11905f6118dcd70050392f168cd7aea086 | /leetcode/40_combination_sum_ii/solution1.py | df0fa9abba6a73cfa6548fd39c14982c906e75fb | [] | no_license | algobot76/leetcode-python | 28f1e1107fa941a3b40006f074eec6231e674ac1 | ec8bff8978d6915bfdf187c760b97ee70f7515af | refs/heads/master | 2021-07-05T17:06:40.581977 | 2020-09-19T22:02:38 | 2020-09-19T22:02:38 | 199,255,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | class Solution:
def combinationSum2(self, candidates, target):
candidates.sort()
combs = []
self.dfs(candidates, target, 0, [], combs)
return combs
def dfs(self, candidates, target, start, comb, combs):
if target < 0:
return
if target == 0:
return combs.append(list(comb))
prev = 0
while start < len(candidates) and candidates[start] <= target:
if prev != candidates[start]:
comb.append(candidates[start])
self.dfs(candidates, target - candidates[start], start + 1,
comb, combs)
comb.pop()
prev = candidates[start]
start += 1
| [
"[email protected]"
] | |
0703e5f22212b00ffaf7e02dd00eeaa7b1966ce3 | cc578cec7c485e2c1060fd075ccc08eb18124345 | /cs15211/TopKFrequentWords.py | 7733837228f8d83367a4b89021aa264f1154d5e3 | [
"Apache-2.0"
] | permissive | JulyKikuAkita/PythonPrac | 18e36bfad934a6112f727b4906a5e4b784182354 | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | refs/heads/master | 2021-01-21T16:49:01.482561 | 2019-02-07T06:15:29 | 2019-02-07T06:15:29 | 91,907,704 | 1 | 1 | Apache-2.0 | 2019-02-07T06:15:30 | 2017-05-20T18:12:53 | Python | UTF-8 | Python | false | false | 5,923 | py | __source__ = 'https://leetcode.com/problems/top-k-frequent-words/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 692. Top K Frequent Words
#
# Given a non-empty list of words, return the k most frequent elements.
#
# Your answer should be sorted by frequency from highest to lowest.
# If two words have the same frequency, then the word with the lower alphabetical order comes first.
#
# Example 1:
# Input: ["i", "love", "leetcode", "i", "love", "coding"], k = 2
# Output: ["i", "love"]
# Explanation: "i" and "love" are the two most frequent words.
# Note that "i" comes before "love" due to a lower alphabetical order.
# Example 2:
# Input: ["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], k = 4
# Output: ["the", "is", "sunny", "day"]
# Explanation: "the", "is", "sunny" and "day" are the four most frequent words,
# with the number of occurrence being 4, 3, 2 and 1 respectively.
# Note:
# You may assume k is always valid, 1 <= k <= number of unique elements.
# Input words contain only lowercase letters.
# Follow up:
# Try to solve it in O(n log k) time and O(n) extra space.
#
import heapq
import unittest
import collections
#
# Approach #1: Sorting [Accepted]
# Time Complexity: O(NlogN), where N is the length of words.
# We count the frequency of each word in O(N) time, then we sort the given words in O(NlogN) time.
#
# Space Complexity: O(N), the space used to store our candidates.
class Solution(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
count = collections.Counter(words)
candidates = count.keys()
candidates.sort(key = lambda w: (-count[w], w))
return candidates[:k]
# In Python, we improve this to O(N+klogN): our heapq.heapify operation and counting operations are O(N),
# and each of kk heapq.heappop operations are O(logN).
# Space Complexity: O(N)O(N), the space used to store our count.
class Solution2(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
count = collections.Counter(words)
heap = [(-freq, word) for word, freq in count.items()]
heapq.heapify(heap)
return [heapq.heappop(heap)[1] for _ in xrange(k)]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/top-k-frequent-words/solution/
# Approach #1: Sorting [Accepted]
# 68ms 11.37%
class Solution {
public List<String> topKFrequent(String[] words, int k) {
Map<String, Integer> count = new HashMap<>();
for (String word: words) {
count.put(word, count.getOrDefault(word, 0) + 1);
}
List<String> candidates = new ArrayList(count.keySet());
Collections.sort(candidates, (w1, w2) -> count.get(w1).equals(count.get(w2))?
w1.compareTo(w2) : count.get(w2) - count.get(w1)); //if w1 - w2,
// sorting in increasing order, thus return least frequent words
return candidates.subList(0, k);
}
}
# Approach #2: Heap [Accepted] PQ
# 11ms 99.80%
# Time Complexity: O(Nlogk), where N is the length of words.
# We count the frequency of each word in O(N) time, then we add N words to the heap,
# each in O(logk) time. Finally, we pop from the heap up to k times. As k <= N, this is O(Nlogk) in total.
/*
Lambda expression
https://www.mkyong.com/java8/java-8-lambda-comparator-example/
*/
# 13ms 81.92%
class Solution {
public List<String> topKFrequent(String[] words, int k) {
List<String> res = new ArrayList<>();
Map<String, Integer> map = new HashMap<>();
for (String word: words) {
map.put(word, map.getOrDefault(word, 0) + 1);
}
PriorityQueue<Map.Entry<String, Integer>> pq = new PriorityQueue<>(new Checker());
for (Map.Entry<String, Integer> entry : map.entrySet()) {
pq.offer(entry);
if (pq.size() > k) pq.poll();
}
while (pq.size() != 0) {
res.add(0, pq.poll().getKey());
}
return res;
}
}
class Checker implements Comparator<Map.Entry<String, Integer>> {
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
if (o1.getValue() == o2.getValue()) {
return o2.getKey().compareTo(o1.getKey());
} else {
return o1.getValue() - o2.getValue();
}
}
}
# 10ms 99.34%
class Solution {
private class Point implements Comparable<Point> {
private String str;
private int count;
public Point(String str) {
this.str = str;
this.count = 1;
}
@Override
public int hashCode() {
return str.hashCode();
}
@Override
public int compareTo(Point b) {
if(count != b.count) {
return b.count - count;
}
else {
return str.compareTo(b.str);
}
}
public void addCount() {
count++;
}
public String getStr() {
return str;
}
}
public List<String> topKFrequent(String[] words, int k) {
Map<String, Point> map = new HashMap<>();
for(String word: words) {
if(map.containsKey(word)) {
map.get(word).addCount();
}
else map.put(word, new Point(word));
}
PriorityQueue<Point> pq = new PriorityQueue<>(map.values());
int count = 0;
List<String> res = new ArrayList<>();
while(!pq.isEmpty() && count < k) {
res.add(pq.poll().getStr());
count++;
}
return res;
}
}
''' | [
"[email protected]"
] | |
01651216a026d86c1a68fac21316efefe8e285b4 | 6b05bddf2e294c8e1b39846aecadfa06b4ff805d | /kubevirt/models/v1_secret_volume_source.py | a4149f175fdbc18ed8d07833b30451edf27ea370 | [
"Apache-2.0"
] | permissive | kubevirt/client-python | 5ca82fe55d48c07f62796d2bed3605a7c189922c | 235fe17f58d41165010be7e4122cb67bdc866fe7 | refs/heads/master | 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 | Apache-2.0 | 2022-10-20T13:52:10 | 2017-09-27T12:51:32 | Python | UTF-8 | Python | false | false | 5,318 | py | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1SecretVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'optional': 'bool',
'secret_name': 'str',
'volume_label': 'str'
}
attribute_map = {
'optional': 'optional',
'secret_name': 'secretName',
'volume_label': 'volumeLabel'
}
def __init__(self, optional=None, secret_name=None, volume_label=None):
"""
V1SecretVolumeSource - a model defined in Swagger
"""
self._optional = None
self._secret_name = None
self._volume_label = None
if optional is not None:
self.optional = optional
if secret_name is not None:
self.secret_name = secret_name
if volume_label is not None:
self.volume_label = volume_label
@property
def optional(self):
"""
Gets the optional of this V1SecretVolumeSource.
Specify whether the Secret or it's keys must be defined
:return: The optional of this V1SecretVolumeSource.
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""
Sets the optional of this V1SecretVolumeSource.
Specify whether the Secret or it's keys must be defined
:param optional: The optional of this V1SecretVolumeSource.
:type: bool
"""
self._optional = optional
@property
def secret_name(self):
"""
Gets the secret_name of this V1SecretVolumeSource.
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:return: The secret_name of this V1SecretVolumeSource.
:rtype: str
"""
return self._secret_name
@secret_name.setter
def secret_name(self, secret_name):
"""
Sets the secret_name of this V1SecretVolumeSource.
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:param secret_name: The secret_name of this V1SecretVolumeSource.
:type: str
"""
self._secret_name = secret_name
@property
def volume_label(self):
"""
Gets the volume_label of this V1SecretVolumeSource.
The volume label of the resulting disk inside the VMI. Different bootstrapping mechanisms require different values. Typical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).
:return: The volume_label of this V1SecretVolumeSource.
:rtype: str
"""
return self._volume_label
@volume_label.setter
def volume_label(self, volume_label):
"""
Sets the volume_label of this V1SecretVolumeSource.
The volume label of the resulting disk inside the VMI. Different bootstrapping mechanisms require different values. Typical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).
:param volume_label: The volume_label of this V1SecretVolumeSource.
:type: str
"""
self._volume_label = volume_label
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1SecretVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
08777ef56a0df912e73d6c15c9f138bd8b2e87c3 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_recognize_pii_entities_async.py | 7c580718d21294e4c46f62a5a71fbf2a0867ba92 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 4,031 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_pii_entities_async.py
DESCRIPTION:
This sample demonstrates how to recognize personally identifiable information in a batch of documents.
The endpoint recognize_pii_entities is only available for API version v3.1-preview and up.
In this sample, we will be working for a company that handles loan payments. To follow privacy guidelines,
we need to redact all of our information before we make it public.
USAGE:
python sample_recognize_pii_entities_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
import asyncio
class RecognizePiiEntitiesSampleAsync(object):
async def recognize_pii_entities_async(self):
print(
"In this sample we will be going through our customer's loan payment information and redacting "
"all PII (personally identifable information) before storing this information on our public website. "
"I'm also looking to explicitly extract the SSN information, so I can update my database with SSNs for "
"our customers"
)
# [START recognize_pii_entities_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
documents = [
"""Parker Doe has repaid all of their loans as of 2020-04-25.
Their SSN is 859-98-0987. To contact them, use their phone number
555-555-5555. They are originally from Brazil and have Brazilian CPF number 998.214.865-68"""
]
async with text_analytics_client:
result = await text_analytics_client.recognize_pii_entities(documents)
docs = [doc for doc in result if not doc.is_error]
print(
"Let's compare the original document with the documents after redaction. "
"I also want to comb through all of the entities that got redacted"
)
for idx, doc in enumerate(docs):
print("Document text: {}".format(documents[idx]))
print("Redacted document text: {}".format(doc.redacted_text))
for entity in doc.entities:
print("...Entity '{}' with category '{}' got redacted".format(
entity.text, entity.category
))
# [END recognize_pii_entities_async]
print("All of the information that I expect to be redacted is!")
print(
"Now I want to explicitly extract SSN information to add to my user SSN database. "
"I also want to be fairly confident that what I'm storing is an SSN, so let's also "
"ensure that we're > 60% positive the entity is a SSN"
)
ssns = []
for doc in docs:
for entity in doc.entities:
if entity.category == 'U.S. Social Security Number (SSN)' and entity.confidence_score >= 0.6:
ssns.append(entity.text)
print("We have extracted the following SSNs as well: '{}'".format(
"', '".join(ssns)
))
async def main():
sample = RecognizePiiEntitiesSampleAsync()
await sample.recognize_pii_entities_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"[email protected]"
] | |
414558f8f2f2f959546e50c46144100f193f178d | 6d429c1bc185fc8180fc69f1d49fd781e9a90748 | /appuser/codemanager.py | 98381f12400d2cfb23c1cb65a163547d03f84290 | [] | no_license | FirayMa/store | 6bc5d350da4170d0ef87d25748635cd1a32aa717 | 542a955451f78f9f904010383b1c661e2fbef471 | refs/heads/master | 2023-05-28T05:33:13.867339 | 2017-09-07T01:00:30 | 2017-09-07T01:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | from django.db import models
import pdb
import random
import string
from django.conf import settings
from common.e_mail import EmailEx
class CodeManager(models.Manager):
"""
验证码的manager
"""
email = EmailEx()
def send_code(self, email):
result={}
if not self.email.EMAIL_REGEX.match(email):
result['status'] = 1
result['msg'] = '电子邮件格式不正确'
else:
code = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(4))
Subject = settings.PROJECTNAME+'注册邮箱验证'
content = '您好, 欢迎您注册, 欢迎加入我们, 您的邮箱验证码是: ' + code
try:
self.email.send_text_email(Subject, content, email)
try:
verify_code = self.model.objects.get(email__exact = email, type ='0')
verify_code.code = code
verify_code.save()
except self.model.DoesNotExist:
verify_code = self.model(email=email, code=code, type ='0')
verify_code.save()
result['status'] = 2
result['msg'] = '验证码已发至您的邮箱中, 请到邮箱中查看您的验证码!'
except Exception as e:
result['status'] = 3
result['msg'] = '发送邮件的过程中发生错误: '+ str(e)
return result
def veirfy_code(self, code, email):
try:
verify_code = self.model.objects.get(email__exact = email, code =code)
return True
except self.model.DoesNotExist:
return False
| [
"[email protected]"
] | |
90103b4dfe92fcefbca7e03b61049dfd4b387ab2 | cc0c0f99a5cf563ff52a76f2ac17cdad09d22f01 | /venv/Lib/site-packages/itk/itkBinaryMask3DMeshSourcePython.py | 9b1e3354b60a4ae82b8bc30de79fa59d8b65a3ec | [] | no_license | Marxss/carck_detect_system | 9c0d338bde322b4c7304fd0addb524d8697c8a7b | d2480f2108052af8af0aa5265a5239c309885043 | refs/heads/master | 2022-04-15T23:34:20.988335 | 2020-03-29T16:24:00 | 2020-03-29T16:24:00 | 214,625,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96,779 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkBinaryMask3DMeshSourcePython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkBinaryMask3DMeshSourcePython', [dirname(__file__)])
except ImportError:
import _itkBinaryMask3DMeshSourcePython
return _itkBinaryMask3DMeshSourcePython
if fp is not None:
try:
_mod = imp.load_module('_itkBinaryMask3DMeshSourcePython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkBinaryMask3DMeshSourcePython = swig_import_helper()
del swig_import_helper
else:
import _itkBinaryMask3DMeshSourcePython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkImageToMeshFilterPython
import itkMeshBasePython
import itkBoundingBoxPython
import itkMapContainerPython
import ITKCommonBasePython
import pyBasePython
import itkVectorPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import vnl_vector_refPython
import itkFixedArrayPython
import itkPointPython
import itkVectorContainerPython
import itkOffsetPython
import itkSizePython
import itkContinuousIndexPython
import itkIndexPython
import itkMatrixPython
import vnl_matrix_fixedPython
import itkCovariantVectorPython
import itkPointSetPython
import itkArrayPython
import itkImagePython
import itkSymmetricSecondRankTensorPython
import itkImageRegionPython
import itkRGBPixelPython
import itkRGBAPixelPython
import itkMeshSourcePython
def itkBinaryMask3DMeshSourceIUS3MD3_New():
return itkBinaryMask3DMeshSourceIUS3MD3.New()
def itkBinaryMask3DMeshSourceIUS3MF3_New():
return itkBinaryMask3DMeshSourceIUS3MF3.New()
def itkBinaryMask3DMeshSourceIUS3MUS3_New():
return itkBinaryMask3DMeshSourceIUS3MUS3.New()
def itkBinaryMask3DMeshSourceIUS3MUC3_New():
return itkBinaryMask3DMeshSourceIUS3MUC3.New()
def itkBinaryMask3DMeshSourceIUS3MSS3_New():
return itkBinaryMask3DMeshSourceIUS3MSS3.New()
def itkBinaryMask3DMeshSourceIUC3MD3_New():
return itkBinaryMask3DMeshSourceIUC3MD3.New()
def itkBinaryMask3DMeshSourceIUC3MF3_New():
return itkBinaryMask3DMeshSourceIUC3MF3.New()
def itkBinaryMask3DMeshSourceIUC3MUS3_New():
return itkBinaryMask3DMeshSourceIUC3MUS3.New()
def itkBinaryMask3DMeshSourceIUC3MUC3_New():
return itkBinaryMask3DMeshSourceIUC3MUC3.New()
def itkBinaryMask3DMeshSourceIUC3MSS3_New():
return itkBinaryMask3DMeshSourceIUC3MSS3.New()
def itkBinaryMask3DMeshSourceISS3MD3_New():
return itkBinaryMask3DMeshSourceISS3MD3.New()
def itkBinaryMask3DMeshSourceISS3MF3_New():
return itkBinaryMask3DMeshSourceISS3MF3.New()
def itkBinaryMask3DMeshSourceISS3MUS3_New():
return itkBinaryMask3DMeshSourceISS3MUS3.New()
def itkBinaryMask3DMeshSourceISS3MUC3_New():
return itkBinaryMask3DMeshSourceISS3MUC3.New()
def itkBinaryMask3DMeshSourceISS3MSS3_New():
return itkBinaryMask3DMeshSourceISS3MSS3.New()
class itkBinaryMask3DMeshSourceISS3MD3(itkImageToMeshFilterPython.itkImageToMeshFilterISS3MD3):
"""Proxy of C++ itkBinaryMask3DMeshSourceISS3MD3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceISS3MD3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceISS3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceISS3MD3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceISS3MD3 self) -> itkBinaryMask3DMeshSourceISS3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_Clone(self)
def SetObjectValue(self, _arg: 'short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceISS3MD3 self, short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceISS3MD3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceISS3MD3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageSS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceISS3MD3 self, itkImageSS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MD3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MD3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceISS3MD3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MD3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MD3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceISS3MD3
Create a new object of the class itkBinaryMask3DMeshSourceISS3MD3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceISS3MD3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceISS3MD3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceISS3MD3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceISS3MD3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_Clone, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_SetObjectValue, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_SetInput, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MD3)
itkBinaryMask3DMeshSourceISS3MD3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_swigregister
itkBinaryMask3DMeshSourceISS3MD3_swigregister(itkBinaryMask3DMeshSourceISS3MD3)
def itkBinaryMask3DMeshSourceISS3MD3___New_orig__() -> "itkBinaryMask3DMeshSourceISS3MD3_Pointer":
"""itkBinaryMask3DMeshSourceISS3MD3___New_orig__() -> itkBinaryMask3DMeshSourceISS3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3___New_orig__()
def itkBinaryMask3DMeshSourceISS3MD3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MD3 *":
"""itkBinaryMask3DMeshSourceISS3MD3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MD3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MD3_cast(obj)
class itkBinaryMask3DMeshSourceISS3MF3(itkImageToMeshFilterPython.itkImageToMeshFilterISS3MF3):
"""Proxy of C++ itkBinaryMask3DMeshSourceISS3MF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceISS3MF3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceISS3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceISS3MF3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceISS3MF3 self) -> itkBinaryMask3DMeshSourceISS3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_Clone(self)
def SetObjectValue(self, _arg: 'short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceISS3MF3 self, short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceISS3MF3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceISS3MF3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageSS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceISS3MF3 self, itkImageSS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MF3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MF3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceISS3MF3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MF3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MF3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceISS3MF3
Create a new object of the class itkBinaryMask3DMeshSourceISS3MF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceISS3MF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceISS3MF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceISS3MF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceISS3MF3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_Clone, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_SetObjectValue, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_SetInput, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MF3)
itkBinaryMask3DMeshSourceISS3MF3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_swigregister
itkBinaryMask3DMeshSourceISS3MF3_swigregister(itkBinaryMask3DMeshSourceISS3MF3)
def itkBinaryMask3DMeshSourceISS3MF3___New_orig__() -> "itkBinaryMask3DMeshSourceISS3MF3_Pointer":
"""itkBinaryMask3DMeshSourceISS3MF3___New_orig__() -> itkBinaryMask3DMeshSourceISS3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3___New_orig__()
def itkBinaryMask3DMeshSourceISS3MF3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MF3 *":
"""itkBinaryMask3DMeshSourceISS3MF3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MF3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MF3_cast(obj)
class itkBinaryMask3DMeshSourceISS3MSS3(itkImageToMeshFilterPython.itkImageToMeshFilterISS3MSS3):
"""Proxy of C++ itkBinaryMask3DMeshSourceISS3MSS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceISS3MSS3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceISS3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceISS3MSS3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceISS3MSS3 self) -> itkBinaryMask3DMeshSourceISS3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_Clone(self)
def SetObjectValue(self, _arg: 'short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceISS3MSS3 self, short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceISS3MSS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceISS3MSS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageSS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceISS3MSS3 self, itkImageSS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MSS3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MSS3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceISS3MSS3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MSS3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MSS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceISS3MSS3
Create a new object of the class itkBinaryMask3DMeshSourceISS3MSS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceISS3MSS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceISS3MSS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceISS3MSS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceISS3MSS3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_Clone, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_SetObjectValue, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_SetInput, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MSS3)
itkBinaryMask3DMeshSourceISS3MSS3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_swigregister
itkBinaryMask3DMeshSourceISS3MSS3_swigregister(itkBinaryMask3DMeshSourceISS3MSS3)
def itkBinaryMask3DMeshSourceISS3MSS3___New_orig__() -> "itkBinaryMask3DMeshSourceISS3MSS3_Pointer":
"""itkBinaryMask3DMeshSourceISS3MSS3___New_orig__() -> itkBinaryMask3DMeshSourceISS3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3___New_orig__()
def itkBinaryMask3DMeshSourceISS3MSS3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MSS3 *":
"""itkBinaryMask3DMeshSourceISS3MSS3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MSS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MSS3_cast(obj)
class itkBinaryMask3DMeshSourceISS3MUC3(itkImageToMeshFilterPython.itkImageToMeshFilterISS3MUC3):
"""Proxy of C++ itkBinaryMask3DMeshSourceISS3MUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceISS3MUC3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceISS3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceISS3MUC3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceISS3MUC3 self) -> itkBinaryMask3DMeshSourceISS3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_Clone(self)
def SetObjectValue(self, _arg: 'short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceISS3MUC3 self, short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceISS3MUC3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceISS3MUC3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageSS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceISS3MUC3 self, itkImageSS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MUC3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MUC3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceISS3MUC3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MUC3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MUC3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceISS3MUC3
Create a new object of the class itkBinaryMask3DMeshSourceISS3MUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceISS3MUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceISS3MUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceISS3MUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceISS3MUC3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_Clone, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_SetObjectValue, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_SetInput, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MUC3)
itkBinaryMask3DMeshSourceISS3MUC3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_swigregister
itkBinaryMask3DMeshSourceISS3MUC3_swigregister(itkBinaryMask3DMeshSourceISS3MUC3)
def itkBinaryMask3DMeshSourceISS3MUC3___New_orig__() -> "itkBinaryMask3DMeshSourceISS3MUC3_Pointer":
"""itkBinaryMask3DMeshSourceISS3MUC3___New_orig__() -> itkBinaryMask3DMeshSourceISS3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3___New_orig__()
def itkBinaryMask3DMeshSourceISS3MUC3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MUC3 *":
"""itkBinaryMask3DMeshSourceISS3MUC3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MUC3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUC3_cast(obj)
class itkBinaryMask3DMeshSourceISS3MUS3(itkImageToMeshFilterPython.itkImageToMeshFilterISS3MUS3):
"""Proxy of C++ itkBinaryMask3DMeshSourceISS3MUS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceISS3MUS3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceISS3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceISS3MUS3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceISS3MUS3 self) -> itkBinaryMask3DMeshSourceISS3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_Clone(self)
def SetObjectValue(self, _arg: 'short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceISS3MUS3 self, short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceISS3MUS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceISS3MUS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageSS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceISS3MUS3 self, itkImageSS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MUS3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceISS3MUS3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceISS3MUS3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MUS3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MUS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceISS3MUS3
Create a new object of the class itkBinaryMask3DMeshSourceISS3MUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceISS3MUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceISS3MUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceISS3MUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceISS3MUS3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_Clone, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_SetObjectValue, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_SetInput, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceISS3MUS3)
itkBinaryMask3DMeshSourceISS3MUS3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_swigregister
itkBinaryMask3DMeshSourceISS3MUS3_swigregister(itkBinaryMask3DMeshSourceISS3MUS3)
def itkBinaryMask3DMeshSourceISS3MUS3___New_orig__() -> "itkBinaryMask3DMeshSourceISS3MUS3_Pointer":
"""itkBinaryMask3DMeshSourceISS3MUS3___New_orig__() -> itkBinaryMask3DMeshSourceISS3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3___New_orig__()
def itkBinaryMask3DMeshSourceISS3MUS3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceISS3MUS3 *":
"""itkBinaryMask3DMeshSourceISS3MUS3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceISS3MUS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceISS3MUS3_cast(obj)
class itkBinaryMask3DMeshSourceIUC3MD3(itkImageToMeshFilterPython.itkImageToMeshFilterIUC3MD3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUC3MD3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MD3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUC3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUC3MD3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUC3MD3 self) -> itkBinaryMask3DMeshSourceIUC3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned char const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUC3MD3 self, unsigned char const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUC3MD3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUC3MD3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUC3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUC3MD3 self, itkImageUC3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MD3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MD3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUC3MD3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MD3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MD3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUC3MD3
Create a new object of the class itkBinaryMask3DMeshSourceIUC3MD3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUC3MD3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUC3MD3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUC3MD3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUC3MD3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_Clone, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_SetInput, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MD3)
itkBinaryMask3DMeshSourceIUC3MD3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_swigregister
itkBinaryMask3DMeshSourceIUC3MD3_swigregister(itkBinaryMask3DMeshSourceIUC3MD3)
def itkBinaryMask3DMeshSourceIUC3MD3___New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MD3_Pointer":
"""itkBinaryMask3DMeshSourceIUC3MD3___New_orig__() -> itkBinaryMask3DMeshSourceIUC3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3___New_orig__()
def itkBinaryMask3DMeshSourceIUC3MD3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MD3 *":
"""itkBinaryMask3DMeshSourceIUC3MD3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MD3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MD3_cast(obj)
class itkBinaryMask3DMeshSourceIUC3MF3(itkImageToMeshFilterPython.itkImageToMeshFilterIUC3MF3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUC3MF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MF3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUC3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUC3MF3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUC3MF3 self) -> itkBinaryMask3DMeshSourceIUC3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned char const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUC3MF3 self, unsigned char const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUC3MF3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUC3MF3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUC3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUC3MF3 self, itkImageUC3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MF3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MF3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUC3MF3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MF3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MF3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUC3MF3
Create a new object of the class itkBinaryMask3DMeshSourceIUC3MF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUC3MF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUC3MF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUC3MF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUC3MF3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_Clone, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_SetInput, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MF3)
itkBinaryMask3DMeshSourceIUC3MF3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_swigregister
itkBinaryMask3DMeshSourceIUC3MF3_swigregister(itkBinaryMask3DMeshSourceIUC3MF3)
def itkBinaryMask3DMeshSourceIUC3MF3___New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MF3_Pointer":
"""itkBinaryMask3DMeshSourceIUC3MF3___New_orig__() -> itkBinaryMask3DMeshSourceIUC3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3___New_orig__()
def itkBinaryMask3DMeshSourceIUC3MF3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MF3 *":
"""itkBinaryMask3DMeshSourceIUC3MF3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MF3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MF3_cast(obj)
class itkBinaryMask3DMeshSourceIUC3MSS3(itkImageToMeshFilterPython.itkImageToMeshFilterIUC3MSS3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUC3MSS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MSS3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUC3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUC3MSS3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUC3MSS3 self) -> itkBinaryMask3DMeshSourceIUC3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned char const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUC3MSS3 self, unsigned char const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUC3MSS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUC3MSS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUC3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUC3MSS3 self, itkImageUC3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MSS3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MSS3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUC3MSS3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MSS3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MSS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUC3MSS3
Create a new object of the class itkBinaryMask3DMeshSourceIUC3MSS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUC3MSS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUC3MSS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUC3MSS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUC3MSS3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_Clone, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_SetInput, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MSS3)
itkBinaryMask3DMeshSourceIUC3MSS3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_swigregister
itkBinaryMask3DMeshSourceIUC3MSS3_swigregister(itkBinaryMask3DMeshSourceIUC3MSS3)
def itkBinaryMask3DMeshSourceIUC3MSS3___New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MSS3_Pointer":
"""itkBinaryMask3DMeshSourceIUC3MSS3___New_orig__() -> itkBinaryMask3DMeshSourceIUC3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3___New_orig__()
def itkBinaryMask3DMeshSourceIUC3MSS3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MSS3 *":
"""itkBinaryMask3DMeshSourceIUC3MSS3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MSS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MSS3_cast(obj)
class itkBinaryMask3DMeshSourceIUC3MUC3(itkImageToMeshFilterPython.itkImageToMeshFilterIUC3MUC3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUC3MUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MUC3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUC3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUC3MUC3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUC3MUC3 self) -> itkBinaryMask3DMeshSourceIUC3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned char const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUC3MUC3 self, unsigned char const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUC3MUC3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUC3MUC3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUC3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUC3MUC3 self, itkImageUC3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MUC3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MUC3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUC3MUC3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MUC3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MUC3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUC3MUC3
Create a new object of the class itkBinaryMask3DMeshSourceIUC3MUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUC3MUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUC3MUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUC3MUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUC3MUC3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_Clone, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_SetInput, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MUC3)
itkBinaryMask3DMeshSourceIUC3MUC3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_swigregister
itkBinaryMask3DMeshSourceIUC3MUC3_swigregister(itkBinaryMask3DMeshSourceIUC3MUC3)
def itkBinaryMask3DMeshSourceIUC3MUC3___New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MUC3_Pointer":
"""itkBinaryMask3DMeshSourceIUC3MUC3___New_orig__() -> itkBinaryMask3DMeshSourceIUC3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3___New_orig__()
def itkBinaryMask3DMeshSourceIUC3MUC3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MUC3 *":
"""itkBinaryMask3DMeshSourceIUC3MUC3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MUC3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUC3_cast(obj)
class itkBinaryMask3DMeshSourceIUC3MUS3(itkImageToMeshFilterPython.itkImageToMeshFilterIUC3MUS3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUC3MUS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MUS3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUC3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUC3MUS3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUC3MUS3 self) -> itkBinaryMask3DMeshSourceIUC3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned char const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUC3MUS3 self, unsigned char const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUC3MUS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUC3MUS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUC3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUC3MUS3 self, itkImageUC3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MUS3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUC3MUS3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUC3MUS3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MUS3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MUS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUC3MUS3
Create a new object of the class itkBinaryMask3DMeshSourceIUC3MUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUC3MUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUC3MUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUC3MUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUC3MUS3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_Clone, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_SetInput, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUC3MUS3)
itkBinaryMask3DMeshSourceIUC3MUS3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_swigregister
itkBinaryMask3DMeshSourceIUC3MUS3_swigregister(itkBinaryMask3DMeshSourceIUC3MUS3)
def itkBinaryMask3DMeshSourceIUC3MUS3___New_orig__() -> "itkBinaryMask3DMeshSourceIUC3MUS3_Pointer":
"""itkBinaryMask3DMeshSourceIUC3MUS3___New_orig__() -> itkBinaryMask3DMeshSourceIUC3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3___New_orig__()
def itkBinaryMask3DMeshSourceIUC3MUS3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUC3MUS3 *":
"""itkBinaryMask3DMeshSourceIUC3MUS3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUC3MUS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUC3MUS3_cast(obj)
class itkBinaryMask3DMeshSourceIUS3MD3(itkImageToMeshFilterPython.itkImageToMeshFilterIUS3MD3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUS3MD3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MD3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUS3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUS3MD3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUS3MD3 self) -> itkBinaryMask3DMeshSourceIUS3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUS3MD3 self, unsigned short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUS3MD3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUS3MD3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUS3MD3 self, itkImageUS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MD3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MD3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUS3MD3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MD3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MD3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUS3MD3
Create a new object of the class itkBinaryMask3DMeshSourceIUS3MD3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUS3MD3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUS3MD3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUS3MD3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUS3MD3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_Clone, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_SetInput, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MD3)
itkBinaryMask3DMeshSourceIUS3MD3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_swigregister
itkBinaryMask3DMeshSourceIUS3MD3_swigregister(itkBinaryMask3DMeshSourceIUS3MD3)
def itkBinaryMask3DMeshSourceIUS3MD3___New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MD3_Pointer":
"""itkBinaryMask3DMeshSourceIUS3MD3___New_orig__() -> itkBinaryMask3DMeshSourceIUS3MD3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3___New_orig__()
def itkBinaryMask3DMeshSourceIUS3MD3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MD3 *":
"""itkBinaryMask3DMeshSourceIUS3MD3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MD3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MD3_cast(obj)
class itkBinaryMask3DMeshSourceIUS3MF3(itkImageToMeshFilterPython.itkImageToMeshFilterIUS3MF3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUS3MF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MF3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUS3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUS3MF3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUS3MF3 self) -> itkBinaryMask3DMeshSourceIUS3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUS3MF3 self, unsigned short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUS3MF3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUS3MF3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUS3MF3 self, itkImageUS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MF3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MF3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUS3MF3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MF3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MF3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUS3MF3
Create a new object of the class itkBinaryMask3DMeshSourceIUS3MF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUS3MF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUS3MF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUS3MF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUS3MF3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_Clone, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_SetInput, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MF3)
itkBinaryMask3DMeshSourceIUS3MF3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_swigregister
itkBinaryMask3DMeshSourceIUS3MF3_swigregister(itkBinaryMask3DMeshSourceIUS3MF3)
def itkBinaryMask3DMeshSourceIUS3MF3___New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MF3_Pointer":
"""itkBinaryMask3DMeshSourceIUS3MF3___New_orig__() -> itkBinaryMask3DMeshSourceIUS3MF3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3___New_orig__()
def itkBinaryMask3DMeshSourceIUS3MF3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MF3 *":
"""itkBinaryMask3DMeshSourceIUS3MF3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MF3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MF3_cast(obj)
class itkBinaryMask3DMeshSourceIUS3MSS3(itkImageToMeshFilterPython.itkImageToMeshFilterIUS3MSS3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUS3MSS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MSS3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUS3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUS3MSS3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUS3MSS3 self) -> itkBinaryMask3DMeshSourceIUS3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUS3MSS3 self, unsigned short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUS3MSS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUS3MSS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUS3MSS3 self, itkImageUS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MSS3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MSS3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUS3MSS3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MSS3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MSS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUS3MSS3
Create a new object of the class itkBinaryMask3DMeshSourceIUS3MSS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUS3MSS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUS3MSS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUS3MSS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUS3MSS3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_Clone, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_SetInput, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MSS3)
itkBinaryMask3DMeshSourceIUS3MSS3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_swigregister
itkBinaryMask3DMeshSourceIUS3MSS3_swigregister(itkBinaryMask3DMeshSourceIUS3MSS3)
def itkBinaryMask3DMeshSourceIUS3MSS3___New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MSS3_Pointer":
"""itkBinaryMask3DMeshSourceIUS3MSS3___New_orig__() -> itkBinaryMask3DMeshSourceIUS3MSS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3___New_orig__()
def itkBinaryMask3DMeshSourceIUS3MSS3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MSS3 *":
"""itkBinaryMask3DMeshSourceIUS3MSS3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MSS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MSS3_cast(obj)
class itkBinaryMask3DMeshSourceIUS3MUC3(itkImageToMeshFilterPython.itkImageToMeshFilterIUS3MUC3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUS3MUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MUC3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUS3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUS3MUC3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUS3MUC3 self) -> itkBinaryMask3DMeshSourceIUS3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUS3MUC3 self, unsigned short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUS3MUC3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUS3MUC3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUS3MUC3 self, itkImageUS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MUC3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MUC3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUS3MUC3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MUC3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MUC3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUS3MUC3
Create a new object of the class itkBinaryMask3DMeshSourceIUS3MUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUS3MUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUS3MUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUS3MUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUS3MUC3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_Clone, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_SetInput, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MUC3)
itkBinaryMask3DMeshSourceIUS3MUC3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_swigregister
itkBinaryMask3DMeshSourceIUS3MUC3_swigregister(itkBinaryMask3DMeshSourceIUS3MUC3)
def itkBinaryMask3DMeshSourceIUS3MUC3___New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MUC3_Pointer":
"""itkBinaryMask3DMeshSourceIUS3MUC3___New_orig__() -> itkBinaryMask3DMeshSourceIUS3MUC3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3___New_orig__()
def itkBinaryMask3DMeshSourceIUS3MUC3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MUC3 *":
"""itkBinaryMask3DMeshSourceIUS3MUC3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MUC3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUC3_cast(obj)
class itkBinaryMask3DMeshSourceIUS3MUS3(itkImageToMeshFilterPython.itkImageToMeshFilterIUS3MUS3):
"""Proxy of C++ itkBinaryMask3DMeshSourceIUS3MUS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MUS3_Pointer":
"""__New_orig__() -> itkBinaryMask3DMeshSourceIUS3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkBinaryMask3DMeshSourceIUS3MUS3_Pointer":
"""Clone(itkBinaryMask3DMeshSourceIUS3MUS3 self) -> itkBinaryMask3DMeshSourceIUS3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_Clone(self)
def SetObjectValue(self, _arg: 'unsigned short const') -> "void":
"""SetObjectValue(itkBinaryMask3DMeshSourceIUS3MUS3 self, unsigned short const _arg)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_SetObjectValue(self, _arg)
def GetNumberOfNodes(self) -> "unsigned long long":
"""GetNumberOfNodes(itkBinaryMask3DMeshSourceIUS3MUS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_GetNumberOfNodes(self)
def GetNumberOfCells(self) -> "unsigned long long":
"""GetNumberOfCells(itkBinaryMask3DMeshSourceIUS3MUS3 self) -> unsigned long long"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_GetNumberOfCells(self)
def SetInput(self, inputImage: 'itkImageUS3') -> "void":
"""SetInput(itkBinaryMask3DMeshSourceIUS3MUS3 self, itkImageUS3 inputImage)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_SetInput(self, inputImage)
def SetRegionOfInterest(self, iRegion: 'itkImageRegion3') -> "void":
"""SetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MUS3 self, itkImageRegion3 iRegion)"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_SetRegionOfInterest(self, iRegion)
def GetRegionOfInterest(self) -> "itkImageRegion3 const &":
"""GetRegionOfInterest(itkBinaryMask3DMeshSourceIUS3MUS3 self) -> itkImageRegion3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_GetRegionOfInterest(self)
__swig_destroy__ = _itkBinaryMask3DMeshSourcePython.delete_itkBinaryMask3DMeshSourceIUS3MUS3
def cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MUS3 *":
"""cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MUS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkBinaryMask3DMeshSourceIUS3MUS3
Create a new object of the class itkBinaryMask3DMeshSourceIUS3MUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkBinaryMask3DMeshSourceIUS3MUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkBinaryMask3DMeshSourceIUS3MUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkBinaryMask3DMeshSourceIUS3MUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkBinaryMask3DMeshSourceIUS3MUS3.Clone = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_Clone, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3.SetObjectValue = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_SetObjectValue, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3.GetNumberOfNodes = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_GetNumberOfNodes, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3.GetNumberOfCells = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_GetNumberOfCells, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3.SetInput = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_SetInput, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3.SetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_SetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3.GetRegionOfInterest = new_instancemethod(_itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_GetRegionOfInterest, None, itkBinaryMask3DMeshSourceIUS3MUS3)
itkBinaryMask3DMeshSourceIUS3MUS3_swigregister = _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_swigregister
itkBinaryMask3DMeshSourceIUS3MUS3_swigregister(itkBinaryMask3DMeshSourceIUS3MUS3)
def itkBinaryMask3DMeshSourceIUS3MUS3___New_orig__() -> "itkBinaryMask3DMeshSourceIUS3MUS3_Pointer":
"""itkBinaryMask3DMeshSourceIUS3MUS3___New_orig__() -> itkBinaryMask3DMeshSourceIUS3MUS3_Pointer"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3___New_orig__()
def itkBinaryMask3DMeshSourceIUS3MUS3_cast(obj: 'itkLightObject') -> "itkBinaryMask3DMeshSourceIUS3MUS3 *":
"""itkBinaryMask3DMeshSourceIUS3MUS3_cast(itkLightObject obj) -> itkBinaryMask3DMeshSourceIUS3MUS3"""
return _itkBinaryMask3DMeshSourcePython.itkBinaryMask3DMeshSourceIUS3MUS3_cast(obj)
def binary_mask3_d_mesh_source(*args, **kwargs):
"""Procedural interface for BinaryMask3DMeshSource"""
import itk
instance = itk.BinaryMask3DMeshSource.New(*args, **kwargs)
return instance.__internal_call__()
def binary_mask3_d_mesh_source_init_docstring():
import itk
import itkTemplate
if isinstance(itk.BinaryMask3DMeshSource, itkTemplate.itkTemplate):
binary_mask3_d_mesh_source.__doc__ = itk.BinaryMask3DMeshSource.values()[0].__doc__
else:
binary_mask3_d_mesh_source.__doc__ = itk.BinaryMask3DMeshSource.__doc__
| [
"[email protected]"
] | |
cbfdc2132564815458464e1f64c86110d7b3e056 | db3d4aa39bc6b3f521ba21afbfedd8164a68e4d5 | /asgiref/conformance_async.py | 5aeeeeacffef2d0bc4747777b7306d1a0c04b24e | [
"BSD-3-Clause"
] | permissive | EdwardBetts/asgiref | 808c55b5089d9c9d8ae33583b0a1728a6313f930 | 0ad52409735109a85238b5b068c77c0f4e60e59e | refs/heads/master | 2021-01-21T22:19:00.404420 | 2017-08-23T03:33:56 | 2017-08-23T03:33:56 | 102,147,619 | 0 | 0 | null | 2017-09-01T19:45:30 | 2017-09-01T19:45:30 | null | UTF-8 | Python | false | false | 743 | py | import asyncio
def test_receive_async(self):
"""
Tests that the asynchronous receive() method works.
"""
# Make sure we can run asyncio code
self.skip_if_no_extension("async")
try:
import asyncio
except ImportError:
raise unittest.SkipTest("No asyncio")
# Test that receive works
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
@asyncio.coroutine
def test():
self.channel_layer.send("test_async", {"is it": "working"})
channel, message = yield from self.channel_layer.receive_async(["test_async"])
self.assertEqual(channel, "test_async")
self.assertEqual(message, {"is it": "working"})
self.loop.run_until_complete(test())
| [
"[email protected]"
] | |
de6ff1b606ca0939e9cc25ea37d7b88e7f76c315 | b9b19792e1890b56679dc167fb99f9612af477f7 | /deeppy/graph/nodes.py | 17893ad9ede4ed472d8bf3fcd5e5d7a6a94a5bf0 | [
"MIT"
] | permissive | fullstackenviormentss/deeppy_experimental | 7990674a8eda0655671940d3baf25256af8a384b | dc06e294e37a30340c7d02ac12c4d00653baf96c | refs/heads/master | 2020-03-18T22:01:01.964338 | 2015-08-25T18:15:28 | 2015-08-25T18:15:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from ..base import Model, ParamMixin, PickleMixin, PhaseMixin
class Node(PhaseMixin, PickleMixin):
def _setup(self, **shapes):
pass
def fprop(self, **arrays):
pass
def bprop(self, **arrays):
pass
def out_shapes(self, **shapes):
pass
class SupervisedBatch(Node):
def __init__(self):
self.name = 'input'
pass
def _setup(self, x_shape, y_shape):
pass
def fprop(self, x, y):
return {'samples': x, 'labels': y}
def bprop(self, samples_grad, labels_grad):
pass
def out_shapes(self, x_shape, y_shape):
return {'samples': x_shape, 'labels': y_shape}
| [
"[email protected]"
] | |
98809dfea4ff4dba9a3ba0d6f49603d5b7cd8938 | f1d67722dcd4c2209eedc0a61e5ea0ee27c95470 | /examples/farmer/farmer_ama.py | 00a79662b473eef48f1d277a7ec361a36bbfb408 | [] | no_license | wangcj05/mpi-sppy | 08204019b466da5e0812b16dd5cb53da1bdbd793 | 42aff4c11dc42fcba8a9520da00e48c6e9ab7d85 | refs/heads/main | 2023-08-25T04:36:58.606490 | 2021-11-01T21:40:14 | 2021-11-01T21:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | # Copyright 2021 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
"""
An example of using amalgomator and solving directly the EF
To execute this:
python farmer_ama.py --num-scens=10 --crops-multiplier=3 --farmer-with-integer
WARNING:
num-scens must be specified !
"""
import mpisppy.utils.amalgomator as amalgomator
def main():
solution_files = {"first_stage_solution":"farmer_first_stage.csv",
}
ama_options = {"EF-2stage": True, # We are solving directly the EF
"write_solution":solution_files}
#The module can be a local file
ama = amalgomator.from_module("afarmer", ama_options)
ama.run()
print("first_stage_solution=", ama.first_stage_solution)
print("inner bound=", ama.best_inner_bound)
print("outer bound=", ama.best_outer_bound)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
4ede039a5f8e824cee79fba2efaf8cbcedf0a1bc | 11195ea809c363f834f3fb31eb7de26437e2eb53 | /course3/reachability.py | a1a09b13ad880b57067f789a2d3918fe4ab64d7b | [
"MIT"
] | permissive | ropable/algorithmic_toolbox | e8d517dbc00541ef10fdc8c3e586194ebbd1b30b | b4dcf4fda19c394da2baa6eced0732bf50585237 | refs/heads/master | 2021-09-09T12:15:37.378207 | 2018-03-16T01:58:41 | 2018-03-16T01:58:41 | 110,786,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | # python3
import sys
def reach(adj, x, y):
# Determine if x can reach y by exploring all of the nodes that x can reach.
visited = [False] * len(adj) # List of all the edges, and whether they have been visited.
return explore(adj, x, y, visited)
def explore(adj, x, y, visited):
# Explore each edge pair.
if x == y: # Nodes are the same: we've reached y.
return 1
visited[x] = True
for i in range(len(adj[x])):
if not visited[adj[x][i]]: # Recurse into each node of the pair.
if explore(adj, adj[x][i], y, visited):
return 1
return 0
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2] # No. of vertices and edges.
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
x, y = data[2 * m:] # u and v - is there a path between these?
x, y = x - 1, y - 1 # They are zero-indexed.
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
adj[b - 1].append(a - 1)
print(reach(adj, x, y))
| [
"[email protected]"
] | |
f4a6ff61bd09f097e3f78db368e0296793dad68d | f1e9f557c5d724dcabbfa17903de93bb82767e35 | /py_opencv_playrtsp.py | 48961e3539f940982eb4128f70fc2a9f5ce1a858 | [] | no_license | gregsheu/python | e5e9ff83dc0ce90541591e726c940e8a1f71a3d4 | 4a77295d58a522974ee85b201ab99cdbe410fd08 | refs/heads/master | 2023-08-18T08:30:15.611727 | 2023-08-08T06:55:44 | 2023-08-08T06:55:44 | 181,270,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import cv2
import ffmpeg
import time
vcap = cv2.VideoCapture("rtsp://admin:[email protected]:554/cam/realmonitor?channel=1&subtype=0")
while(1):
ret, frame = vcap.read()
print(frame.tobytes())
cv2.imshow('channel2', frame)
cv2.waitKey(1)
| [
"[email protected]"
] | |
2daa7490a61cc2719677837eea96644bd3d7879a | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/ListDynamicImageRequest.py | ebe94cb925af50eeef533e6e56955b712cd79567 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,447 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvod.endpoint import endpoint_data
class ListDynamicImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vod', '2017-03-21', 'ListDynamicImage','vod')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_VideoId(self): # String
return self.get_query_params().get('VideoId')
def set_VideoId(self, VideoId): # String
self.add_query_param('VideoId', VideoId)
| [
"[email protected]"
] | |
c78e0f7af5816b19efcea2334f9803e925c03c0c | d25eebb25595c25b73fdc64447f7cf5998204b0d | /gtkApi/ReportEditor.py | ca6d3ae8746a0c2d9fb7a526f2f18423739f3bc5 | [] | no_license | BackupTheBerlios/baseui | a3867c0cc4aa30cf2a7b0dcaf9dbeec68dc5ef0b | a8296aa42f0de42c18f7dfb5d20966bad695709b | refs/heads/master | 2021-01-15T22:28:52.114731 | 2012-12-05T16:31:03 | 2012-12-05T16:31:03 | 39,894,612 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#===============================================================================
# ReportEditor module.
# by Mark Muzenhardt, published under LGPL-License.
#===============================================================================
import pygtk
pygtk.require('2.0')
import gtk
class ReportEditor:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("Translation Editor")
window.connect("destroy", lambda w: gtk.main_quit())
vbox = gtk.VBox()
window.add(vbox)
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, expand=False, fill=True)
button_print = gtk.Button('Druck')
button_print.connect("clicked", self.on_button_print_clicked)
toolbar.add(button_print)
button_backward = gtk.Button('<-')
toolbar.add(button_backward)
button_forward = gtk.Button('->')
toolbar.add(button_forward)
button_cancel = gtk.Button('Abbruch')
button_cancel.connect("clicked", lambda w: gtk.main_quit())
toolbar.add(button_cancel)
label = gtk.Label('NIIX')
vbox.add(label)
window.show_all()
# This methods are doing the initial --------------------------------------
def on_button_print_clicked(self, widget=None, data=None):
pass
# Start the GTK mainloop ------------------------------------------------------
def main():
gtk.main()
return 0
if __name__ == "__main__":
ReportEditor()
main()
| [
"devnull@localhost"
] | devnull@localhost |
462a7046e8a050379388b4c55914328f5e45deca | a34df0359b8aa5ef03c010fe91229e4cbb765d1f | /Step X/twilio/rest/studio/v1/flow/engagement/__init__.py | fe27c9983cdfb3ea2a9b071aeb5806fec9df053a | [
"Unlicense"
] | permissive | wrestlerdude/QuackathonRubeGoldberg2019 | f881d6c131ca8349946d01be29ff4ad272e11159 | fdaafb79add30a3de075fa0ab9c7c88900081f65 | refs/heads/master | 2020-04-20T11:52:01.937292 | 2019-02-04T18:10:54 | 2019-02-04T18:10:54 | 168,828,471 | 1 | 0 | Unlicense | 2019-02-02T21:50:33 | 2019-02-02T12:16:32 | PHP | UTF-8 | Python | false | false | 16,098 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.studio.v1.flow.engagement.engagement_context import EngagementContextList
from twilio.rest.studio.v1.flow.engagement.step import StepList
class EngagementList(ListResource):
""" """
def __init__(self, version, flow_sid):
"""
Initialize the EngagementList
:param Version version: Version that contains the resource
:param flow_sid: Flow Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementList
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementList
"""
super(EngagementList, self).__init__(version)
# Path Solution
self._solution = {'flow_sid': flow_sid, }
self._uri = '/Flows/{flow_sid}/Engagements'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams EngagementInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.engagement.EngagementInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists EngagementInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.engagement.EngagementInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EngagementInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return EngagementPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of EngagementInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return EngagementPage(self._version, response, self._solution)
def create(self, to, from_, parameters=values.unset):
"""
Create a new EngagementInstance
:param unicode to: The Contact phone number to start a Studio Flow Engagement.
:param unicode from_: The Twilio phone number to send messages or initiate calls from during the Flow Engagement.
:param dict parameters: JSON data that will be added to your flow's context and can accessed as variables inside your flow.
:returns: Newly created EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
data = values.of({'To': to, 'From': from_, 'Parameters': serialize.object(parameters), })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return EngagementInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
def get(self, sid):
"""
Constructs a EngagementContext
:param sid: Engagement Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
return EngagementContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a EngagementContext
:param sid: Engagement Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
return EngagementContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Studio.V1.EngagementList>'
class EngagementPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the EngagementPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param flow_sid: Flow Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementPage
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementPage
"""
super(EngagementPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of EngagementInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
return EngagementInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Studio.V1.EngagementPage>'
class EngagementContext(InstanceContext):
""" """
def __init__(self, version, flow_sid, sid):
"""
Initialize the EngagementContext
:param Version version: Version that contains the resource
:param flow_sid: Flow Sid.
:param sid: Engagement Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
super(EngagementContext, self).__init__(version)
# Path Solution
self._solution = {'flow_sid': flow_sid, 'sid': sid, }
self._uri = '/Flows/{flow_sid}/Engagements/{sid}'.format(**self._solution)
# Dependents
self._steps = None
self._engagement_context = None
def fetch(self):
"""
Fetch a EngagementInstance
:returns: Fetched EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return EngagementInstance(
self._version,
payload,
flow_sid=self._solution['flow_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the EngagementInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
@property
def steps(self):
"""
Access the steps
:returns: twilio.rest.studio.v1.flow.engagement.step.StepList
:rtype: twilio.rest.studio.v1.flow.engagement.step.StepList
"""
if self._steps is None:
self._steps = StepList(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['sid'],
)
return self._steps
@property
def engagement_context(self):
"""
Access the engagement_context
:returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
:rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
"""
if self._engagement_context is None:
self._engagement_context = EngagementContextList(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['sid'],
)
return self._engagement_context
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Studio.V1.EngagementContext {}>'.format(context)
class EngagementInstance(InstanceResource):
""" """
class Status(object):
ACTIVE = "active"
ENDED = "ended"
def __init__(self, version, payload, flow_sid, sid=None):
"""
Initialize the EngagementInstance
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
super(EngagementInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'flow_sid': payload['flow_sid'],
'contact_sid': payload['contact_sid'],
'contact_channel_address': payload['contact_channel_address'],
'context': payload['context'],
'status': payload['status'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'url': payload['url'],
'links': payload['links'],
}
# Context
self._context = None
self._solution = {'flow_sid': flow_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EngagementContext for this EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
if self._context is None:
self._context = EngagementContext(
self._version,
flow_sid=self._solution['flow_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this Engagement.
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: Account Sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def flow_sid(self):
"""
:returns: Flow Sid.
:rtype: unicode
"""
return self._properties['flow_sid']
@property
def contact_sid(self):
"""
:returns: Contact Sid.
:rtype: unicode
"""
return self._properties['contact_sid']
@property
def contact_channel_address(self):
"""
:returns: The phone number, SIP address or Client identifier that triggered this Engagement.
:rtype: unicode
"""
return self._properties['contact_channel_address']
@property
def context(self):
"""
:returns: Flow state.
:rtype: dict
"""
return self._properties['context']
@property
def status(self):
"""
:returns: The Status of this Engagement
:rtype: EngagementInstance.Status
"""
return self._properties['status']
@property
def date_created(self):
"""
:returns: The date this Engagement was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this Engagement was updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: Nested resource URLs.
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a EngagementInstance
:returns: Fetched EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the EngagementInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
@property
def steps(self):
"""
Access the steps
:returns: twilio.rest.studio.v1.flow.engagement.step.StepList
:rtype: twilio.rest.studio.v1.flow.engagement.step.StepList
"""
return self._proxy.steps
@property
def engagement_context(self):
"""
Access the engagement_context
:returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
:rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
"""
return self._proxy.engagement_context
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Studio.V1.EngagementInstance {}>'.format(context)
| [
"[email protected]"
] | |
97dc0dee0ef8ce0ada8c9102b035a98d5717adee | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /account_voucher/__manifest__.py | 34480401b13ad5043af7067acd03109289d910d1 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
{
'name' : 'Sale & Purchase Vouchers',
'version' : '1.0',
'summary': 'Manage your debts and credits thanks to simple sale/purchase receipts',
'description': """
TODO
old description:
Invoicing & Payments by Accounting Voucher & Receipts
=====================================================
The specific and easy-to-use Invoicing system in ALWAFI allows you to keep track of your accounting, even when you are not an accountant. It provides an easy way to follow up on your vendors and customers.
You could use this simplified accounting in case you work with an (external) account to keep your books, and you still want to keep track of payments.
The Invoicing system includes receipts and vouchers (an easy way to keep track of sales and purchases). It also offers you an easy method of registering payments, without having to encode complete abstracts of account.
This module manages:
* Voucher Entry
* Voucher Receipt [Sales & Purchase]
* Voucher Payment [Customer & Vendors]
""",
'category': 'Accounting',
'sequence': 20,
'depends' : ['account'],
'demo' : [],
'data' : [
'security/ir.model.access.csv',
'views/account_voucher_views.xml',
'security/account_voucher_security.xml',
'data/account_voucher_data.xml',
],
'auto_install': False,
'installable': True,
}
| [
"[email protected]"
] | |
b56d4fe821cd8462bbda70acd89752b0fbce8a74 | 7c91f92d2d82e0d9fd85af09f9d18226c747f7fa | /rhoci/forms/test.py | bb9d6fe3cf671e23e1b037366251aa9886986d9a | [
"Apache-2.0"
] | permissive | bregman-arie/rhoci | 5488afe8d884cb72a3475eef68ebc54944b45453 | bae1f1d737a12ede50d263a6496faf2b698515b5 | refs/heads/master | 2023-02-25T10:53:01.642377 | 2022-12-10T14:37:40 | 2022-12-10T14:37:40 | 90,493,854 | 12 | 8 | Apache-2.0 | 2023-02-16T07:11:11 | 2017-05-06T22:06:20 | CSS | UTF-8 | Python | false | false | 1,117 | py | # Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from flask_wtf import FlaskForm
from wtforms import BooleanField
from wtforms import StringField
from wtforms import SubmitField
class TestSearch(FlaskForm):
class_name = StringField('class name')
test_name = StringField('test name')
status = StringField('status')
failed_since = StringField('failed since')
skipped_message = StringField('skipped message')
stdout = StringField('stdout')
stderr = StringField('stderr')
skipped = BooleanField()
submit = SubmitField('Search')
| [
"[email protected]"
] | |
0fa2b8c8ec819233bc34543f46cd4fd13fe8509b | 7d75018c3d8e2ac85ea0f5bbaf52ce5eae9761ca | /project/gohelp/settings.py | 3bfa30ab59e9abf68240589b9a17501126484713 | [] | no_license | AVS18/sdp-sem5 | fff484331d9b588558b928e557a974f05652adcb | 238dcc7dfe50dda9678383590a43b23bbcd99553 | refs/heads/main | 2023-01-14T01:01:18.297711 | 2020-11-14T13:43:55 | 2020-11-14T13:43:55 | 288,098,284 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,850 | py | """
Django settings for gohelp project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6-rp4=_omlx$ya3@dms@a8jnpamp#$dl^y(bx!0ptji47ag!qk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base',
'worker',
'customer',
'storages'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gohelp.urls'
AUTH_USER_MODEL = 'base.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gohelp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'gohelp',
'USER': 'postgres',
'PASSWORD': 'kamakshi@1234',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
import os
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
AWS_ACCESS_KEY_ID = 'replace the credentials'
AWS_SECRET_ACCESS_KEY = "replace the credentials"
AWS_STORAGE_BUCKET_NAME = "gohelp"
AWS_S3_REGION_NAME = 'ap-south-1'
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = None
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER='[email protected]'
EMAIL_HOST_PASSWORD='aditya12345'
EMAIL_USE_TLS = True
| [
"[email protected]"
] | |
4443aa6863038875ca5ad3372f122475c4993118 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2015_04_01/aio/_monitor_management_client.py | 5640ee566505865cb91ec42008e9408f5e7a74d8 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,526 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import MonitorManagementClientConfiguration
from .operations import (
ActivityLogsOperations,
AlertRulesOperations,
AutoscaleSettingsOperations,
EventCategoriesOperations,
Operations,
TenantActivityLogsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MonitorManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Monitor Management Client.
:ivar activity_logs: ActivityLogsOperations operations
:vartype activity_logs: azure.mgmt.monitor.v2015_04_01.aio.operations.ActivityLogsOperations
:ivar autoscale_settings: AutoscaleSettingsOperations operations
:vartype autoscale_settings:
azure.mgmt.monitor.v2015_04_01.aio.operations.AutoscaleSettingsOperations
:ivar event_categories: EventCategoriesOperations operations
:vartype event_categories:
azure.mgmt.monitor.v2015_04_01.aio.operations.EventCategoriesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.monitor.v2015_04_01.aio.operations.Operations
:ivar tenant_activity_logs: TenantActivityLogsOperations operations
:vartype tenant_activity_logs:
azure.mgmt.monitor.v2015_04_01.aio.operations.TenantActivityLogsOperations
:ivar alert_rules: AlertRulesOperations operations
:vartype alert_rules: azure.mgmt.monitor.v2015_04_01.aio.operations.AlertRulesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = MonitorManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.activity_logs = ActivityLogsOperations(self._client, self._config, self._serialize, self._deserialize)
self.autoscale_settings = AutoscaleSettingsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.event_categories = EventCategoriesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.tenant_activity_logs = TenantActivityLogsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.alert_rules = AlertRulesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
712dba93a2621c8f100b375020d6fe1a26b33587 | 155cfef4bb35d20bc574f63f3443039bfcc1ab7e | /srcs/mahjong/admin/admin/admin.py | dae8ebe48a3a7b3d835c15ac939a653d4622e62b | [] | no_license | rolllyroman/fish_web | 3116481a6a16484283f428eb7c98ecea7cee99d4 | eb5a05ea3d56c7d9f599009e8ab6f4cb322e9023 | refs/heads/master | 2020-03-28T01:08:57.346228 | 2018-09-06T03:34:05 | 2018-09-06T03:34:05 | 147,480,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | #-*- coding:utf-8 -*-
#!/usr/bin/python
"""
Author:$Author$
Date:$Date$
Revision:$Revision$
Description:
后台APP应用入口
"""
from bottle import Bottle
from common.install_plugin import install_redis_plugin,install_session_plugin
admin_app = Bottle()
install_redis_plugin(admin_app)
install_session_plugin(admin_app)
import admin_index
import admin_auth
#会员模块
import admin_member
# 数据统计模块
import admin_statistics
# 个人信息模块
import admin_self
# 代理模块
import admin_agent
# 用户权限模块
import admin_power
#游戏模块
import admin_game
#订单模块
import admin_order
#商品模块
import admin_goods
#系统设置
import admin_setting
#消息设置
import admin_notic
#捕鱼模块
import admin_fish
#福利模块
import admin_benefit
'''
金币场模块
'''
import admin_gold
| [
"[email protected]"
] | |
d34c789dde64b5b39999009db01b1063b4be7c34 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4458015.3.spec | e9ca614706903a6b3dabcb4a519425f93b2f6d24 | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,311 | spec | {
"id": "mgm4458015.3",
"metadata": {
"mgm4458015.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 862581,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 33579,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 450,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1675,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 532230,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 392,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 23712,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 1217940,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 49,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 122822,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 46050,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 26663,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 277435,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 6684,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5341,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 11391,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 15601,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 12182568,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 98,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 28,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 800,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1298,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 422,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 193,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22871,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 4675,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458015.3/file/999.done.species.stats"
}
},
"id": "mgm4458015.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4458015.3"
}
},
"raw": {
"mgm4458015.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4458015.3"
}
}
} | [
"[email protected]"
] | |
0565aa6f020b9a0cec1aadb20a7b89e216fe928b | f0d0ea29240c53b6ce1c4b06095b528ece02fdd7 | /core/championship.py | d714983cb55b99879f051866efec7695e0065120 | [] | no_license | zhifuliu/dianjing | 477529ccd6159329e1bc121aeb2ff328ee499f4a | 7b3f6d58f5bc0738651d8d72c9a24df4ade0ed36 | refs/heads/master | 2020-03-21T09:10:28.343268 | 2017-03-24T03:06:24 | 2017-03-24T03:06:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,317 | py | # -*- coding: utf-8 -*-
"""
Author: Wang Chao <[email protected]>
Filename: championship
Date Created: 2016-12-09 15:13
Description:
"""
import random
import arrow
import itertools
import requests
from django.conf import settings
from dianjing.exception import GameException
from core.mongo import (
MongoChampionship,
MongoChampionshipFormationWay1,
MongoChampionshipFormationWay2,
MongoChampionshipFormationWay3,
MongoChampionshipGroup,
MongoChampionshipLevel,
MongoChampionHistory,
MongoCharacter,
)
from core.plunder import PlunderFormation, Plunder, is_npc
from core.vip import VIP
from core.club import Club, get_club_property
from core.mail import MailManager
from core.resource import ResourceClassification
from core.match import ClubMatch, MatchRecord
from core.winning import WinningChampionship
from utils.message import MessagePipe, MessageFactory
from utils.functional import make_string_id, make_time_of_today, get_start_time_of_today
from utils.operation_log import OperationLog
from config import (
GlobalConfig,
ConfigErrorMessage,
ConfigChampionBet,
ConfigChampionRankReward,
ConfigChampionScoreReward,
ConfigChampionWinScore,
ConfigPlunderNPC,
ConfigNPCFormation,
)
from protomsg.common_pb2 import ACT_INIT, ACT_UPDATE
from protomsg.championship_pb2 import (
CHAMPION_LEVEL_1,
CHAMPION_LEVEL_2,
CHAMPION_LEVEL_4,
CHAMPION_LEVEL_8,
CHAMPION_LEVEL_16,
ChampionFormationNotify,
ChampionGroupNotify,
ChampionLevelNotify,
ChampionNotify,
ChampionClub as MsgChampionClub,
)
from protomsg.match_pb2 import ClubMatchServerSideRequest, ClubMatchServerSideResponse
from protomsg.plunder_pb2 import PlunderFormation as MsgPlunderFormation
from protomsg.formation_pb2 import FORMATION_SLOT_USE
from protomsg.leaderboard_pb2 import LeaderboardChampionshipNotify
# XX强 进阶顺序
LEVEL_SEQ = [16, 8, 4, 2, 1]
LEVEL_NEXT_TABLE = {
16: 8,
8: 4,
4: 2,
2: 1,
}
LEVEL_PREVIOUS_TABLE = {v: k for k, v in LEVEL_NEXT_TABLE.iteritems()}
# 小组赛比赛时间
GROUP_MATCH_TIME = [
[14, 0],
[15, 0],
[16, 0],
[17, 0],
[18, 0],
[19, 0],
]
# GROUP_MATCH_TIME = [
# [16, 5],
# [16, 7],
# [16, 9],
# [16, 11],
# [16, 13],
# [16, 15],
# ]
LEVEL_MATCH_TIMES_TO_HOUR_MINUTE_TABLE = {
16: [19, 30],
8: [20, 0],
4: [20, 30],
2: [21, 0],
}
# LEVEL_MATCH_TIMES_TO_HOUR_MINUTE_TABLE = {
# 16: [16, 20],
# 8: [16, 25],
# 4: [16, 30],
# 2: [16, 35],
# }
# 开战前几分钟不能调整阵型和下注
MINUTES_LIMIT_FOR_FORMATION_AND_BET = 10
# [[(hour, minute), (hour, minute)] ...]
# 每个元素是两个 h, m 的组合
# 表示 在他们之间 的时间 是禁止的
TIME_LIMIT = []
for __h, __m in itertools.chain(GROUP_MATCH_TIME, LEVEL_MATCH_TIMES_TO_HOUR_MINUTE_TABLE.values()):
__m1 = __m - MINUTES_LIMIT_FOR_FORMATION_AND_BET
if __m1 < 0:
__m1 += 60
__h1 = __h - 1
assert __h1 >= 0
else:
__h1 = __h
TIME_LIMIT.append(((__h1, __m1), (__h, __m)))
# 提前几分钟开打
MATCH_AHEAD_MINUTE = 1
# 允许报名 周几
APPLY_WEEKDAY = [
# 0, # 星期一
1, # 星期二
# 2, # 星期三
3, # 星期四
# 4, # 星期五
5, # 星期六
# 6, # 星期日
]
# 允许报名时间范围 hour, minute
APPLY_TIME_RANGE = [(8, 0), (13, 30)]
MATCH_SERVER_REQ_HEADERS = {'NMVC_APIRequest': 'StartCombat'}
AUTO_APPLY_VIP_LEVEL = GlobalConfig.value("CHAMPIONSHIP_AUTO_APPLY_VIP_LEVEL")
APPLY_CLUB_LEVEL_LIMIT = GlobalConfig.value("CHAMPIONSHIP_APPLY_LEVEL_LIMIT")
def find_level_match_at(lv):
today = get_start_time_of_today()
weekday = today.weekday()
days_shift = 0
while True:
if weekday in APPLY_WEEKDAY:
break
weekday -= 1
if weekday < 0:
weekday = 6
days_shift += 1
if days_shift >= 7:
raise RuntimeError("ChampionshipLevel find match at error!")
that_day = today.replace(days=-days_shift)
prev_lv = LEVEL_PREVIOUS_TABLE[lv]
hour, minute = LEVEL_MATCH_TIMES_TO_HOUR_MINUTE_TABLE[prev_lv]
that_day = that_day.replace(hour=hour)
that_day = that_day.replace(minute=minute)
return that_day
def make_pairs_from_flat_list(items):
pairs = []
for i in range(0, len(items) - 1, 2):
pairs.append((items[i], items[i + 1]))
return pairs
def check_club_level(silence=True):
def deco(fun):
def wrap(self, *args, **kwargs):
"""
:type self: Championship
"""
if self.club_level < APPLY_CLUB_LEVEL_LIMIT:
if silence:
return
raise GameException(ConfigErrorMessage.get_error_id("CLUB_LEVEL_NOT_ENOUGH"))
return fun(self, *args, **kwargs)
return wrap
return deco
def check_time_limit(fun):
def wrap(self, *args, **kwargs):
now = arrow.utcnow().to(settings.TIME_ZONE)
for (_h1, _m1), (_h2, _m2) in TIME_LIMIT:
if _h1 <= now.hour <= _h2 and _m1 <= now.minute < _m2:
raise GameException(ConfigErrorMessage.get_error_id("CHAMPIONSHIP_FORMATION_FORBIDDEN"))
return fun(self, *args, **kwargs)
return wrap
class ChampionshipFormationWay1(PlunderFormation):
__slots__ = []
MONGO_COLLECTION = MongoChampionshipFormationWay1
class ChampionshipFormationWay2(PlunderFormation):
__slots__ = []
MONGO_COLLECTION = MongoChampionshipFormationWay2
class ChampionshipFormationWay3(PlunderFormation):
__slots__ = []
MONGO_COLLECTION = MongoChampionshipFormationWay3
WAY_MAP = {
1: ChampionshipFormationWay1,
2: ChampionshipFormationWay2,
3: ChampionshipFormationWay3,
}
# 报名前清理上一次残留信息
def before_apply(server_id):
MongoChampionshipLevel.db(server_id).drop()
MongoChampionship.db(server_id).update_many(
{},
{'$set': {
'bet': {},
'has_bet': False
}}
)
basic_notify = make_common_basic_notify_msg(server_id)
basic_data = MessageFactory.pack(basic_notify)
level_notify = ChampionshipLevel(server_id).make_protomsg()
level_data = MessageFactory.pack(level_notify)
char_ids = OperationLog.get_recent_action_char_ids(server_id)
for cid in char_ids:
mp = MessagePipe(cid)
mp.put(data=basic_data)
mp.put(data=level_data)
# 取历史前N
def get_history_top_clubs(server_id):
doc = MongoChampionHistory.db(server_id).find_one(
{'_id': MongoChampionHistory.DOC_ID}
)
if not doc:
return []
clubs = []
for i in doc['member_ids']:
clubs.append((i, doc['info'][i]['name'], doc['info'][i]['flag']))
return clubs
# 公共相同的 ChampionNotify, applied, bet 就每个角色自己设置
def make_common_basic_notify_msg(server_id):
notify = ChampionNotify()
notify.applied = False
for lv in LEVEL_SEQ:
notify_bet = notify.bet.add()
notify_bet.level = lv
# no bet info
top_clubs = get_history_top_clubs(server_id)
for i, name, flag in top_clubs:
notify_top_club = notify.top_clubs.add()
notify_top_club.id = i
notify_top_club.name = name
notify_top_club.flag = flag
return notify
# 空的group消息
def make_empty_group_notify_msg():
notify = ChampionGroupNotify()
notify.my_score = 0
notify.my_rank = 0
return notify
# 清空championship
# NOTE 这个方法用不上
def totally_reset(server_id, send_notify=False):
MongoChampionship.db(server_id).update_many(
{},
{'$set': {
'applied': False,
'bet': {},
'has_bet': False,
}}
)
MongoChampionshipGroup.db(server_id).drop()
MongoChampionshipLevel.db(server_id).drop()
if send_notify:
basic_notify = make_common_basic_notify_msg(server_id)
basic_data = MessageFactory.pack(basic_notify)
group_notify = make_empty_group_notify_msg()
group_data = MessageFactory.pack(group_notify)
level_notify = ChampionshipLevel(server_id).make_protomsg()
level_data = MessageFactory.pack(level_notify)
char_ids = OperationLog.get_recent_action_char_ids(server_id)
for cid in char_ids:
mp = MessagePipe(cid)
mp.put(data=basic_data)
mp.put(data=group_data)
mp.put(data=level_data)
def make_plunder_formation_msg(club, way_id):
"""
:type club: core.abstract.AbstractClub
:type way_id: int
"""
msg = MsgPlunderFormation()
msg.way = way_id
power = 0
for index, s in enumerate(club.formation_staffs):
power += s.power
msg_slot = msg.formation.add()
msg_slot.slot_id = index + 1
msg_slot.status = FORMATION_SLOT_USE
msg_slot.staff_id = s.id
msg_slot.unit_id = s.unit.id
msg_slot.position = s.formation_position
msg_slot.staff_oid = s.oid
msg_slot.policy = 1
msg.power = power
return msg
class Match(object):
__slots__ = ['server_id', 'id_one', 'info_one', 'id_two', 'info_two']
def __init__(self, server_id, id_one, info_one, id_two, info_two):
self.server_id = server_id
self.id_one = id_one
self.info_one = info_one
self.id_two = id_two
self.info_two = info_two
def make_3_way_clubs(self, _id, _info):
"""
:rtype: list[core.abstract.AbstractClub]
"""
clubs = []
skill_sequences = []
if is_npc(_id):
for i in range(1, 4):
npc_id = _info['ways_npc'][i - 1]
club = ConfigNPCFormation.get(npc_id)
club.id = _id
club.name = _info['name']
club.flag = _info['flag']
clubs.append(club)
skill_sequences.append({})
else:
cs = Championship(self.server_id, int(_id))
for i in range(1, 4):
way = cs.get_way_object(i)
club = Club(self.server_id, int(_id), load_staffs=False)
club.formation_staffs = way.formation_staffs
clubs.append(club)
skill_sequences.append(way.get_skill_sequence())
return clubs, skill_sequences
def start(self):
def one_way_match(_club_one, _club_two, _skill_sequence_one, _skill_sequence_two):
_match = ClubMatch(_club_one, _club_two, 3, _skill_sequence_one, _skill_sequence_two)
_msg = _match.start(auto_load_staffs=False, check_empty=False)
_msg.key = ""
_msg.map_name = GlobalConfig.value_string("MATCH_MAP_CHAMPIONSHIP")
_req = ClubMatchServerSideRequest()
_req.match.MergeFrom(_msg)
_data = _req.SerializeToString()
_res = requests.post(match_server_url, headers=MATCH_SERVER_REQ_HEADERS, data=_data)
response = ClubMatchServerSideResponse()
response.ParseFromString(_res.content)
if response.star > 0:
_win = 1
else:
_win = 0
return _win, _msg.SerializeToString(), response.record
host, port = random.choice(settings.MATCH_SERVERS)
match_server_url = 'http://{0}:{1}/'.format(host, port)
one_clubs, one_skill_sequences = self.make_3_way_clubs(self.id_one, self.info_one)
two_clubs, two_skill_sequences = self.make_3_way_clubs(self.id_two, self.info_two)
# [one_wins, record_ids]
one_wins = []
info_sets = []
for i in range(3):
club_one = one_clubs[i]
club_two = two_clubs[i]
win, club_match, record = one_way_match(
one_clubs[i], two_clubs[i],
one_skill_sequences[i], two_skill_sequences[i]
)
one_wins.append(win)
info_sets.append((club_one.id, club_two.id, club_match, record))
record_ids = MatchRecord.batch_create(self.server_id, info_sets)
return one_wins, record_ids
class Championship(object):
__slots__ = ['server_id', 'char_id', 'doc', 'club_level']
def __init__(self, server_id, char_id):
self.server_id = server_id
self.char_id = char_id
self.doc = MongoChampionship.db(self.server_id).find_one({'_id': self.char_id})
if not self.doc:
self.doc = MongoChampionship.document()
self.doc['_id'] = self.char_id
MongoChampionship.db(self.server_id).insert_one(self.doc)
self.club_level = get_club_property(self.server_id, self.char_id, 'level')
@check_club_level(silence=True)
def try_initialize(self, send_notify=True):
if self.doc['active']:
return
# 从 掠夺阵型 拷贝
p = Plunder(self.server_id, self.char_id)
for i in [1, 2, 3]:
way = p.get_way_object(i)
doc = way.get_or_create_doc()
WAY_MAP[i].MONGO_COLLECTION.db(self.server_id).delete_one({'_id': self.char_id})
WAY_MAP[i].MONGO_COLLECTION.db(self.server_id).insert_one(doc)
self.doc['active'] = True
MongoChampionship.db(self.server_id).update_one(
{'_id': self.char_id},
{'$set': {
'active': True
}}
)
if send_notify:
self.send_notify()
def is_applied(self):
# vip 自动apply
if self.doc['applied']:
return True
if self.club_level < APPLY_CLUB_LEVEL_LIMIT:
return False
if VIP(self.server_id, self.char_id).level < AUTO_APPLY_VIP_LEVEL:
return False
return True
@check_club_level(silence=False)
def apply_in(self):
now = arrow.utcnow().to(settings.TIME_ZONE)
if now.weekday() not in APPLY_WEEKDAY:
raise GameException(ConfigErrorMessage.get_error_id("CHAMPIONSHIP_APPLY_NOT_OPEN"))
range_start = make_time_of_today(APPLY_TIME_RANGE[0][0], APPLY_TIME_RANGE[0][1])
range_end = make_time_of_today(APPLY_TIME_RANGE[1][0], APPLY_TIME_RANGE[1][1])
if now < range_start or now >= range_end:
raise GameException(ConfigErrorMessage.get_error_id("CHAMPIONSHIP_APPLY_NOT_OPEN"))
if self.is_applied():
raise GameException(ConfigErrorMessage.get_error_id("CHAMPIONSHIP_ALREADY_APPLIED"))
self.doc['applied'] = True
MongoChampionship.db(self.server_id).update_one(
{'_id': self.char_id},
{'$set': {
'applied': True
}}
)
self.send_basic_notify()
@check_time_limit
@check_club_level(silence=False)
def bet(self, club_id, bet_id):
cl = ChampionshipLevel(self.server_id)
lv = cl.get_current_level()
if lv == 1:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
if str(lv) in self.doc['bet']:
raise GameException(ConfigErrorMessage.get_error_id("CHAMPIONSHIP_ALREADY_BET"))
if club_id not in cl.doc['levels'].get(str(lv), {}).get('member_ids', []):
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
config = ConfigChampionBet.get(bet_id)
if not config:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
if config.level != lv:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
rc = ResourceClassification.classify(config.cost)
rc.check_exist(self.server_id, self.char_id)
rc.remove(self.server_id, self.char_id, message="Champion.bet:{0}".format(bet_id))
bet_info = {
'club_id': club_id,
'bet_id': bet_id
}
self.doc['bet'][str(lv)] = bet_info
self.doc['has_bet'] = True
MongoChampionship.db(self.server_id).update_one(
{'_id': self.char_id},
{'$set': {
'bet.{0}'.format(lv): bet_info,
'has_bet': True,
}}
)
self.send_basic_notify()
def get_way_object(self, way_id):
"""
:rtype: PlunderFormation
"""
try:
way_class = WAY_MAP[way_id]
except KeyError:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
return way_class(self.server_id, self.char_id, way_id)
def find_way_id_by_staff_id(self, staff_id):
for i in [1, 2, 3]:
if self.get_way_object(i).is_staff_in_formation(staff_id):
return i
return 0
def find_way_id_by_unit_id(self, unit_id):
for i in [1, 2, 3]:
if self.get_way_object(i).is_unit_in_formation(unit_id):
return i
return 0
@check_time_limit
@check_club_level(silence=False)
def set_staff(self, way_id, slot_id, staff_id):
way_list = [1, 2, 3]
if way_id not in way_list:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
if slot_id not in [1, 2, 3]:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
way_list.remove(way_id)
for i in way_list:
w = self.get_way_object(i)
w.try_unset_staff(staff_id)
w = self.get_way_object(way_id)
w.set_staff(slot_id, staff_id)
self.send_formation_notify()
@check_time_limit
@check_club_level(silence=False)
def set_unit(self, way_id, slot_id, unit_id):
if slot_id not in [1, 2, 3]:
raise GameException(ConfigErrorMessage.get_error_id("INVALID_OPERATE"))
w = self.get_way_object(way_id)
w.set_unit(slot_id, unit_id)
self.send_formation_notify()
@check_time_limit
@check_club_level(silence=False)
def set_position(self, way_id, formation_slots):
my_way = self.get_way_object(way_id)
my_way.sync_slots(formation_slots)
self.send_formation_notify()
@check_time_limit
@check_club_level(silence=False)
def skill_sequence_set_staff(self, way_id, seq_id, index, staff_id):
w = self.get_way_object(way_id)
w.skill_sequence_set_staff(seq_id, index, staff_id)
self.send_formation_notify()
@check_club_level(silence=False)
def sync_group(self):
cg = ChampionshipGroup(self.server_id)
cg.find_by_char_id(self.char_id)
group_msg = cg.make_protomsg()
MessagePipe(self.char_id).put(msg=group_msg)
@check_club_level(silence=False)
def sync_level(self):
cl = ChampionshipLevel(self.server_id)
current_lv = cl.doc['current_level']
level_msg = cl.make_protomsg(level=current_lv)
MessagePipe(self.char_id).put(msg=level_msg)
@check_club_level(silence=True)
def send_notify(self):
self.send_basic_notify()
self.send_formation_notify()
cg = ChampionshipGroup(self.server_id)
cg.find_by_char_id(self.char_id)
group_msg = cg.make_protomsg()
MessagePipe(self.char_id).put(msg=group_msg)
cl = ChampionshipLevel(self.server_id)
level_msg = cl.make_protomsg()
MessagePipe(self.char_id).put(msg=level_msg)
def send_basic_notify(self, basic_notify=None):
if not basic_notify:
basic_notify = make_common_basic_notify_msg(self.server_id)
basic_notify.applied = self.is_applied()
for bet in basic_notify.bet:
bet_info = self.doc['bet'].get(str(bet.level), {})
if bet_info:
bet.bet_for = bet_info['club_id']
bet.bet_id = bet_info['bet_id']
MessagePipe(self.char_id).put(msg=basic_notify)
@check_club_level(silence=True)
def send_formation_notify(self):
notify = ChampionFormationNotify()
for i in [1, 2, 3]:
notify_way = notify.formation.add()
w = self.get_way_object(i)
notify_way.MergeFrom(w.make_protobuf())
MessagePipe(self.char_id).put(msg=notify)
class ChampionshipGroup(object):
__slots__ = ['server_id', 'group_id', 'doc', '_char_id', '_member_ids', '_info']
def __init__(self, server_id):
self.server_id = server_id
self.group_id = None
self.doc = None
# 只有在 find_by_char_id 并且找到group的清空下,才填充 _char_id
self._char_id = None
# 这两个仅仅是初始化group的适合保存信息的,
# 后面查询到的数据,这两个并不填充
self._member_ids = []
self._info = {}
def find_by_char_id(self, char_id):
self.doc = MongoChampionshipGroup.db(self.server_id).find_one(
{'member_ids': str(char_id)}
)
if self.doc:
self.group_id = self.doc['_id']
self._char_id = char_id
def find_by_group_id(self, group_id):
self.doc = MongoChampionshipGroup.db(self.server_id).find_one(
{'_id': group_id}
)
if self.doc:
self.group_id = group_id
@classmethod
def new(cls, server_id):
obj = cls(server_id)
obj.group_id = make_string_id()
return obj
def add_club(self, club_id, club_info):
self._member_ids.append(club_id)
self._info[club_id] = club_info
def finish(self):
doc = MongoChampionshipGroup.document()
doc['_id'] = self.group_id
doc['member_ids'] = self._member_ids
doc['info'] = self._info
doc['scores'] = {i: 0 for i in self._member_ids}
doc['logs'] = {i: [] for i in self._member_ids}
doc['match_times'] = 1
MongoChampionshipGroup.db(self.server_id).insert_one(doc)
def get_scores_sorted(self):
if not self.doc:
return []
scores = self.doc['scores'].items()
scores.sort(key=lambda item: item[1], reverse=True)
return scores
def get_top_two(self):
scores = self.get_scores_sorted()
return [scores[0][0], scores[1][0]]
def start_match(self):
match_times = self.doc['match_times']
if match_times == 7:
return match_times
hour, minute = GROUP_MATCH_TIME[match_times - 1]
match_at = make_time_of_today(hour, minute).timestamp
scores = self.get_scores_sorted()
pairs = make_pairs_from_flat_list(scores)
for (id_one, _), (id_two, _) in pairs:
info_one = self.doc['info'][id_one]
info_two = self.doc['info'][id_two]
m = Match(self.server_id, id_one, info_one, id_two, info_two)
one_way_wins, record_ids = m.start()
two_way_wins = [1 - _w for _w in one_way_wins]
one_way_wins_count = len([_w for _w in one_way_wins if _w == 1])
two_way_wins_count = len([_w for _w in two_way_wins if _w == 1])
one_got_score = ConfigChampionWinScore.get(one_way_wins_count).score
two_got_score = ConfigChampionWinScore.get(two_way_wins_count).score
self.doc['scores'][id_one] += one_got_score
self.doc['scores'][id_two] += two_got_score
one_name = self.doc['info'][id_one]['name']
two_name = self.doc['info'][id_two]['name']
one_log = self.make_match_log(match_at, two_name, one_got_score, one_way_wins, record_ids)
two_log = self.make_match_log(match_at, one_name, two_got_score, two_way_wins, record_ids)
self.doc['logs'][id_one].append(one_log)
self.doc['logs'][id_two].append(two_log)
self.send_score_reward_mail(id_one, self.doc['scores'][id_one])
self.send_score_reward_mail(id_two, self.doc['scores'][id_two])
self.doc['match_times'] += 1
MongoChampionshipGroup.db(self.server_id).update_one(
{'_id': self.group_id},
{'$set': {
'scores': self.doc['scores'],
'logs': self.doc['logs'],
'match_times': self.doc['match_times'],
}}
)
return self.doc['match_times']
def send_score_reward_mail(self, club_id, score):
if is_npc(club_id):
return
config = ConfigChampionScoreReward.get(score)
if not config:
return
rc = ResourceClassification.classify(config.reward)
attachment = rc.to_json()
m = MailManager(self.server_id, int(club_id))
m.add(config.mail_title, config.mail_content, attachment=attachment)
@staticmethod
def make_match_log(match_at, target_name, got_score, way_wins, record_ids):
doc = MongoChampionshipGroup.document_match_log()
doc['timestamp'] = match_at
doc['target_name'] = target_name
doc['got_score'] = got_score
doc['way_wins'] = way_wins
doc['record_ids'] = record_ids
return doc
def make_clubs_msg(self, scores=None):
msgs = []
if not scores:
scores = self.get_scores_sorted()
for index, (club_id, score) in enumerate(scores):
rank = index + 1
if rank >= 10:
# 只发前10名
break
msg = MsgChampionClub()
msg.id = club_id
msg.name = self.doc['info'][club_id]['name']
msg.flag = self.doc['info'][club_id]['flag']
msg.rank = rank
msg.score = score
msgs.append(msg)
return msgs
def make_protomsg(self):
if not self.doc:
return make_empty_group_notify_msg()
my_score = 0
my_rank = 0
scores = self.get_scores_sorted()
for _index, (_id, _score) in enumerate(scores):
if _id == str(self._char_id):
my_score = _score
my_rank = _index + 1
break
notify = ChampionGroupNotify()
notify.my_score = my_score
notify.my_rank = my_rank
clubs = self.make_clubs_msg(scores=scores)
for c in clubs:
notify_club = notify.clubs.add()
notify_club.MergeFrom(c)
for log in self.doc['logs'][str(self._char_id)]:
notify_log = notify.logs.add()
notify_log.timestamp = log['timestamp']
notify_log.target_name = log['target_name']
notify_log.got_score = log['got_score']
notify_log.way_wins.extend(log['way_wins'])
notify_log.match_record_ids.extend(log['record_ids'])
match_times = self.doc['match_times']
if match_times > 6:
notify.next_match_at = 0
else:
hour, minute = GROUP_MATCH_TIME[match_times - 1]
notify.next_match_at = make_time_of_today(hour, minute).timestamp
pairs = make_pairs_from_flat_list(scores)
for (id_one, _), (id_two, _) in pairs:
if id_one == str(self._char_id):
notify.next_target.id = id_two
notify.next_target.name = self.doc['info'][id_two]['name']
notify.next_target.flag = self.doc['info'][id_two]['flag']
elif id_two == str(self._char_id):
notify.next_target.id = id_one
notify.next_target.name = self.doc['info'][id_one]['name']
notify.next_target.flag = self.doc['info'][id_one]['flag']
return notify
class ChampionshipGroupManager(object):
@classmethod
def find_all_groups(cls, server_id):
"""
:rtype: list[ChampionshipGroup]
"""
groups = []
""":type: list[ChampionshipGroup]"""
group_docs = MongoChampionshipGroup.db(server_id).find({})
for doc in group_docs:
g = ChampionshipGroup(server_id)
g.group_id = doc['_id']
g.doc = doc
groups.append(g)
return groups
@classmethod
def find_applied_clubs(cls, server_id):
docs = MongoChampionship.db(server_id).find(
{'applied': True},
{'_id': 1}
)
club_ids = [doc['_id'] for doc in docs]
club_ids = set(club_ids)
vip_ids = VIP.query_char_ids(server_id, min_level=AUTO_APPLY_VIP_LEVEL)
if vip_ids:
club_docs = MongoCharacter.db(server_id).find(
{'_id': {'$in': vip_ids}},
{'level': 1}
)
for doc in club_docs:
if doc['level'] >= APPLY_CLUB_LEVEL_LIMIT:
club_ids.add(doc['_id'])
return list(club_ids)
@classmethod
def assign_to_groups(cls, server_id, club_ids):
club_amount = len(club_ids)
if club_amount < 32:
need_npc_amount = 32 - club_amount
else:
if club_amount % 2 == 0:
need_npc_amount = 0
else:
need_npc_amount = 1
info = {}
if club_ids:
club_docs = MongoCharacter.db(server_id).find(
{'_id': {'$in': club_ids}},
{'name': 1, 'flag': 1}
)
club_info = {doc['_id']: doc for doc in club_docs}
for i in club_ids:
info[str(i)] = {
'name': club_info[i]['name'],
'flag': club_info[i]['flag'],
}
for i in range(need_npc_amount):
npc_doc = ConfigPlunderNPC.get(2).to_simple_doc()
npc_id = npc_doc.pop('id')
info[npc_id] = npc_doc
ids = info.keys()
random.shuffle(ids)
# 把这些ids 随机分配到8个 group 中
groups = []
""":type: list[ChampionshipGroup]"""
for i in range(8):
g = ChampionshipGroup.new(server_id)
groups.append(g)
g_index = 0
while True:
try:
_id = ids.pop(0)
except IndexError:
break
groups[g_index].add_club(_id, info[_id])
g_index += 1
if g_index >= 8:
g_index = 0
for g in groups:
g.finish()
char_ids = OperationLog.get_recent_action_char_ids(server_id)
for cid in char_ids:
g = ChampionshipGroup(server_id)
g.find_by_char_id(cid)
msg = g.make_protomsg()
MessagePipe(cid).put(msg=msg)
@classmethod
def start_match(cls, server_id):
groups = cls.find_all_groups(server_id)
if not groups:
return 0
match_times = 0
for g in groups:
match_times = g.start_match()
if match_times == 7:
# 小组赛打完了
# 其实这个drop没必要,不过以防万一
MongoChampionshipLevel.db(server_id).drop()
cl = ChampionshipLevel(server_id)
cl.initialize()
level_notify = cl.make_protomsg()
level_data = MessageFactory.pack(level_notify)
char_ids = OperationLog.get_recent_action_char_ids(server_id)
for cid in char_ids:
MessagePipe(cid).put(data=level_data)
return match_times - 1
class ChampionshipLevel(object):
__slots__ = ['server_id', 'doc']
def __init__(self, server_id):
self.server_id = server_id
self.doc = MongoChampionshipLevel.db(self.server_id).find_one(
{'_id': MongoChampionshipLevel.DOC_ID}
)
if not self.doc:
self.doc = MongoChampionshipLevel.document()
MongoChampionshipLevel.db(self.server_id).insert_one(self.doc)
def initialize(self):
# 16强初始化
groups = ChampionshipGroupManager.find_all_groups(self.server_id)
info = {}
tops = []
way_wins = {}
record_ids = {}
for g in groups:
id_one, id_two = g.get_top_two()
info[id_one] = g.doc['info'][id_one]
info[id_two] = g.doc['info'][id_two]
tops.append((id_one, id_two))
way_wins[id_one] = g.doc['logs'][id_one][-1]['way_wins']
record_ids[id_one] = g.doc['logs'][id_one][-1]['record_ids']
way_wins[id_two] = g.doc['logs'][id_two][-1]['way_wins']
record_ids[id_two] = g.doc['logs'][id_two][-1]['record_ids']
# 1~4组第一名 vs 5~8组第二名
# 1~4组第二名 vs 5~8组第一名
member_ids = []
for i in range(4):
member_ids.append(tops[i][0])
member_ids.append(tops[i + 4][1])
for i in range(4):
member_ids.append(tops[i][1])
member_ids.append(tops[i + 4][0])
self.doc['info'] = info
self.save(16, member_ids, way_wins, record_ids, info=info)
def get_current_level(self):
return self.doc['current_level']
def save(self, level, member_ids, way_wins, record_ids, info=None):
level_doc = MongoChampionshipLevel.document_level()
level_doc['member_ids'] = member_ids
level_doc['way_wins'] = way_wins
level_doc['record_ids'] = record_ids
self.doc['levels'][str(level)] = level_doc
self.doc['current_level'] = level
updater = {
'levels.{0}'.format(level): level_doc,
'current_level': level,
}
if info:
updater['info'] = info
MongoChampionshipLevel.db(self.server_id).update_one(
{'_id': MongoChampionshipLevel.DOC_ID},
{'$set': updater}
)
self.send_rank_reward_mail(level)
def send_rank_reward_mail(self, level):
config = ConfigChampionRankReward.get(level)
member_ids = self.doc['levels'][str(level)]['member_ids']
rc = ResourceClassification.classify(config.reward)
attachment = rc.to_json()
for m in member_ids:
if is_npc(m):
continue
m = MailManager(self.server_id, int(m))
m.add(config.mail_title, config.mail_content, attachment=attachment)
def send_bet_reward_mail(self, level, win_ids):
# 找到所有bet的玩家,然后遍历
docs = MongoChampionship.db(self.server_id).find({'has_bet': True})
for doc in docs:
bet_info = doc['bet'].get(str(level), {})
if not bet_info:
continue
config = ConfigChampionBet.get(bet_info['bet_id'])
if bet_info['club_id'] in win_ids:
m_title = config.win_mail_title
m_content = config.win_mail_content
m_reward = config.win_reward
else:
m_title = config.lose_mail_title
m_content = config.lose_mail_content
m_reward = config.lose_reward
rc = ResourceClassification.classify(m_reward)
attachment = rc.to_json()
m = MailManager(self.server_id, doc['_id'])
m.add(m_title, m_content, attachment=attachment)
def start_match(self):
if not self.doc['levels']:
return 0
lv = self.doc['current_level']
if lv == 1:
return None
next_level = LEVEL_NEXT_TABLE[lv]
member_ids = self.doc['levels'][str(lv)]['member_ids']
pairs = make_pairs_from_flat_list(member_ids)
win_ids = []
lose_ids = []
way_wins = {}
record_ids = {}
for id_one, id_two in pairs:
info_one = self.doc['info'][id_one]
info_two = self.doc['info'][id_two]
m = Match(self.server_id, id_one, info_one, id_two, info_two)
one_way_wins, one_record_ids = m.start()
two_way_wins = [1 - _w for _w in one_way_wins]
one_way_wins_count = len([_w for _w in one_way_wins if _w == 1])
if one_way_wins_count >= 2:
win_ids.append(id_one)
lose_ids.append(id_two)
way_wins[id_one] = one_way_wins
record_ids[id_one] = one_record_ids
else:
win_ids.append(id_two)
lose_ids.append(id_one)
way_wins[id_two] = two_way_wins
record_ids[id_two] = one_record_ids
self.save(next_level, win_ids, way_wins, record_ids)
# 发送下注邮件
self.send_bet_reward_mail(lv, win_ids)
if next_level == 1:
self.after_final_match()
return lv
def after_final_match(self):
# 已经打完了,但还要得出第三四名,并记录前四名
level_4_member_ids = self.doc['levels']['4']['member_ids'][:]
level_2_member_ids = self.doc['levels']['2']['member_ids'][:]
for i in level_2_member_ids:
level_4_member_ids.remove(i)
id_one = level_4_member_ids[0]
id_two = level_4_member_ids[1]
info_one = self.doc['info'][id_one]
info_two = self.doc['info'][id_two]
m = Match(self.server_id, id_one, info_one, id_two, info_two)
one_way_wins, one_record_ids = m.start()
# two_way_wins = [1 - _w for _w in one_way_wins]
one_way_wins_count = len([_w for _w in one_way_wins if _w == 1])
if one_way_wins_count >= 2:
third = id_one
fourth = id_two
else:
third = id_two
fourth = id_one
first = self.doc['levels']['1']['member_ids'][0]
level_2_member_ids.remove(first)
second = level_2_member_ids[0]
first_info = self.doc['info'][first]
second_info = self.doc['info'][second]
third_info = self.doc['info'][third]
fourth_info = self.doc['info'][fourth]
MongoChampionHistory.db(self.server_id).drop()
history_doc = MongoChampionHistory.document()
history_doc['member_ids'] = [first, second, third, fourth]
history_doc['info'] = {
first: first_info,
second: second_info,
third: third_info,
fourth: fourth_info,
}
MongoChampionHistory.db(self.server_id).insert_one(history_doc)
# 清空小组赛
MongoChampionshipGroup.db(self.server_id).drop()
group_notify = make_empty_group_notify_msg()
group_data = MessageFactory.pack(group_notify)
# 清空玩家的报名标识
MongoChampionship.db(self.server_id).update_many(
{},
{'$set': {
'applied': False
}}
)
char_ids = OperationLog.get_recent_action_char_ids(self.server_id)
basic_notify = make_common_basic_notify_msg(self.server_id)
for _cid in char_ids:
MessagePipe(_cid).put(data=group_data)
Championship(self.server_id, _cid).send_basic_notify(basic_notify=basic_notify)
# 设置winning
winning_notify = LeaderboardChampionshipNotify()
winning_notify.session = ""
for __id, __info in [(first, first_info), (second, second_info), (third, third_info)]:
__match = Match(self.server_id, None, None, None, None)
__clubs, __skill_sequence = __match.make_3_way_clubs(__id, __info)
winning_notify_club = winning_notify.clubs.add()
winning_notify_club.club.MergeFrom(__clubs[0].make_protomsg())
for __way_id in [1, 2, 3]:
winning_notify_club_formation = winning_notify_club.formation.add()
winning_notify_club_formation.MergeFrom(make_plunder_formation_msg(__clubs[__way_id - 1], __way_id))
WinningChampionship(self.server_id, None).set_to_common(winning_notify)
def make_protomsg(self, level=None):
if level:
levels = [level]
act = ACT_UPDATE
else:
levels = [CHAMPION_LEVEL_16, CHAMPION_LEVEL_8, CHAMPION_LEVEL_4, CHAMPION_LEVEL_2, CHAMPION_LEVEL_1]
act = ACT_INIT
notify = ChampionLevelNotify()
notify.act = act
if act == ACT_INIT:
level16 = self.doc['levels'].get('16', {})
if level16:
for i in level16['member_ids']:
notify_club = notify.clubs.add()
notify_club.id = i
notify_club.name = self.doc['info'][i]['name']
notify_club.flag = self.doc['info'][i]['flag']
for lv in levels:
notify_level = notify.levels.add()
notify_level.level = lv
this_level = self.doc['levels'].get(str(lv), {})
if this_level:
for _mid in this_level['member_ids']:
notify_level_club = notify_level.clubs.add()
notify_level_club.id = _mid
notify_level_club.way_wins.extend(this_level['way_wins'][str(_mid)])
notify_level_club.match_record_ids.extend(this_level['record_ids'][str(_mid)])
if lv == 16:
notify_level.match_at = 0
else:
notify_level.match_at = find_level_match_at(lv).timestamp
return notify
| [
"[email protected]"
] | |
911744a0becf71a9d8142dc9e796c3949f6243a8 | 26c0f80688f75a188097a232c229a73c8e7cc6ed | /user/migrations/0016_auto_20210511_1700.py | c17235302b993169c5ae1b568f59d2271a6b2144 | [] | no_license | creep1g/DjangoWebstore | 8207d7ea53c478fb7e5745e1c6ae6699102b5df5 | bd27340b86bf2289b8c14216462d932ccdf4986d | refs/heads/main | 2023-05-06T09:50:04.846489 | 2021-05-28T14:40:40 | 2021-05-28T14:40:40 | 371,730,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # Generated by Django 3.2 on 2021-05-11 17:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0015_auto_20210511_1655'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='searches',
field=models.ManyToManyField(null=True, to='user.SearchHistory'),
),
]
| [
"[email protected]"
] | |
31c63484ece90ef1a58d4d8a1c917875e71e42ba | 0729bc2e2236fadb8fb2eac8b30534d939a45b2e | /DistAnnot/Annot/tests.py | e0c741e72f672231d4fd71b9ee91a723a70a444e | [] | no_license | JudoWill/pyMutF | 8ecdc24fbb2efe2a0a721aab164a2b060de11832 | aaf41ab41eb897c10a721c62913bb49c79f2cefc | refs/heads/master | 2021-01-16T20:34:06.705933 | 2010-10-11T16:55:08 | 2010-10-11T16:55:08 | 710,208 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | """
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from DistAnnot.Interaction.models import *
from forms import AnnotForm, InteractionEffectForm
from django.forms.formsets import formset_factory
class SimpleTest(TestCase):
fixtures = ['Interaction.simple_data.yaml']
| [
"[email protected]"
] | |
ba0f8b5d3e6818f96a7f42132ea32967e054c957 | 2f330fc050de11676ab46b963b7878882e9b6614 | /memsource_cli/models/create_analyse_list_async_dto.py | 0679fd3b864b9449bd836de3615a6545e4f4fed0 | [
"Apache-2.0"
] | permissive | zerodayz/memsource-cli-client | 609f48c18a2b6daaa639d4cb8a61da43763b5143 | c2574f1467539a49e6637c874e88d75c7ef789b3 | refs/heads/master | 2020-08-01T12:43:06.497982 | 2019-09-30T11:14:13 | 2019-09-30T11:14:13 | 210,999,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,872 | py | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:[email protected]>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.id_reference import IdReference # noqa: F401,E501
from memsource_cli.models.uid_reference import UidReference # noqa: F401,E501
class CreateAnalyseListAsyncDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'jobs': 'list[UidReference]',
'type': 'str',
'include_fuzzy_repetitions': 'bool',
'include_confirmed_segments': 'bool',
'include_numbers': 'bool',
'include_locked_segments': 'bool',
'count_source_units': 'bool',
'include_trans_memory': 'bool',
'include_non_translatables': 'bool',
'include_machine_translation_matches': 'bool',
'trans_memory_post_editing': 'bool',
'non_translatable_post_editing': 'bool',
'machine_translate_post_editing': 'bool',
'name': 'str',
'net_rate_scheme': 'IdReference',
'compare_workflow_level': 'int',
'use_project_analysis_settings': 'bool',
'callback_url': 'str'
}
attribute_map = {
'jobs': 'jobs',
'type': 'type',
'include_fuzzy_repetitions': 'includeFuzzyRepetitions',
'include_confirmed_segments': 'includeConfirmedSegments',
'include_numbers': 'includeNumbers',
'include_locked_segments': 'includeLockedSegments',
'count_source_units': 'countSourceUnits',
'include_trans_memory': 'includeTransMemory',
'include_non_translatables': 'includeNonTranslatables',
'include_machine_translation_matches': 'includeMachineTranslationMatches',
'trans_memory_post_editing': 'transMemoryPostEditing',
'non_translatable_post_editing': 'nonTranslatablePostEditing',
'machine_translate_post_editing': 'machineTranslatePostEditing',
'name': 'name',
'net_rate_scheme': 'netRateScheme',
'compare_workflow_level': 'compareWorkflowLevel',
'use_project_analysis_settings': 'useProjectAnalysisSettings',
'callback_url': 'callbackUrl'
}
def __init__(self, jobs=None, type=None, include_fuzzy_repetitions=None, include_confirmed_segments=None, include_numbers=None, include_locked_segments=None, count_source_units=None, include_trans_memory=None, include_non_translatables=None, include_machine_translation_matches=None, trans_memory_post_editing=None, non_translatable_post_editing=None, machine_translate_post_editing=None, name=None, net_rate_scheme=None, compare_workflow_level=None, use_project_analysis_settings=None, callback_url=None): # noqa: E501
"""CreateAnalyseListAsyncDto - a model defined in Swagger""" # noqa: E501
self._jobs = None
self._type = None
self._include_fuzzy_repetitions = None
self._include_confirmed_segments = None
self._include_numbers = None
self._include_locked_segments = None
self._count_source_units = None
self._include_trans_memory = None
self._include_non_translatables = None
self._include_machine_translation_matches = None
self._trans_memory_post_editing = None
self._non_translatable_post_editing = None
self._machine_translate_post_editing = None
self._name = None
self._net_rate_scheme = None
self._compare_workflow_level = None
self._use_project_analysis_settings = None
self._callback_url = None
self.discriminator = None
self.jobs = jobs
if type is not None:
self.type = type
if include_fuzzy_repetitions is not None:
self.include_fuzzy_repetitions = include_fuzzy_repetitions
if include_confirmed_segments is not None:
self.include_confirmed_segments = include_confirmed_segments
if include_numbers is not None:
self.include_numbers = include_numbers
if include_locked_segments is not None:
self.include_locked_segments = include_locked_segments
if count_source_units is not None:
self.count_source_units = count_source_units
if include_trans_memory is not None:
self.include_trans_memory = include_trans_memory
if include_non_translatables is not None:
self.include_non_translatables = include_non_translatables
if include_machine_translation_matches is not None:
self.include_machine_translation_matches = include_machine_translation_matches
if trans_memory_post_editing is not None:
self.trans_memory_post_editing = trans_memory_post_editing
if non_translatable_post_editing is not None:
self.non_translatable_post_editing = non_translatable_post_editing
if machine_translate_post_editing is not None:
self.machine_translate_post_editing = machine_translate_post_editing
if name is not None:
self.name = name
if net_rate_scheme is not None:
self.net_rate_scheme = net_rate_scheme
if compare_workflow_level is not None:
self.compare_workflow_level = compare_workflow_level
if use_project_analysis_settings is not None:
self.use_project_analysis_settings = use_project_analysis_settings
if callback_url is not None:
self.callback_url = callback_url
@property
def jobs(self):
"""Gets the jobs of this CreateAnalyseListAsyncDto. # noqa: E501
:return: The jobs of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: list[UidReference]
"""
return self._jobs
@jobs.setter
def jobs(self, jobs):
"""Sets the jobs of this CreateAnalyseListAsyncDto.
:param jobs: The jobs of this CreateAnalyseListAsyncDto. # noqa: E501
:type: list[UidReference]
"""
if jobs is None:
raise ValueError("Invalid value for `jobs`, must not be `None`") # noqa: E501
self._jobs = jobs
@property
def type(self):
"""Gets the type of this CreateAnalyseListAsyncDto. # noqa: E501
default: PreAnalyse # noqa: E501
:return: The type of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CreateAnalyseListAsyncDto.
default: PreAnalyse # noqa: E501
:param type: The type of this CreateAnalyseListAsyncDto. # noqa: E501
:type: str
"""
allowed_values = ["PreAnalyse", "PostAnalyse", "Compare"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def include_fuzzy_repetitions(self):
"""Gets the include_fuzzy_repetitions of this CreateAnalyseListAsyncDto. # noqa: E501
Default: true # noqa: E501
:return: The include_fuzzy_repetitions of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_fuzzy_repetitions
@include_fuzzy_repetitions.setter
def include_fuzzy_repetitions(self, include_fuzzy_repetitions):
"""Sets the include_fuzzy_repetitions of this CreateAnalyseListAsyncDto.
Default: true # noqa: E501
:param include_fuzzy_repetitions: The include_fuzzy_repetitions of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_fuzzy_repetitions = include_fuzzy_repetitions
@property
def include_confirmed_segments(self):
"""Gets the include_confirmed_segments of this CreateAnalyseListAsyncDto. # noqa: E501
Default: true # noqa: E501
:return: The include_confirmed_segments of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_confirmed_segments
@include_confirmed_segments.setter
def include_confirmed_segments(self, include_confirmed_segments):
"""Sets the include_confirmed_segments of this CreateAnalyseListAsyncDto.
Default: true # noqa: E501
:param include_confirmed_segments: The include_confirmed_segments of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_confirmed_segments = include_confirmed_segments
@property
def include_numbers(self):
"""Gets the include_numbers of this CreateAnalyseListAsyncDto. # noqa: E501
Default: true # noqa: E501
:return: The include_numbers of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_numbers
@include_numbers.setter
def include_numbers(self, include_numbers):
"""Sets the include_numbers of this CreateAnalyseListAsyncDto.
Default: true # noqa: E501
:param include_numbers: The include_numbers of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_numbers = include_numbers
@property
def include_locked_segments(self):
"""Gets the include_locked_segments of this CreateAnalyseListAsyncDto. # noqa: E501
Default: true # noqa: E501
:return: The include_locked_segments of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_locked_segments
@include_locked_segments.setter
def include_locked_segments(self, include_locked_segments):
"""Sets the include_locked_segments of this CreateAnalyseListAsyncDto.
Default: true # noqa: E501
:param include_locked_segments: The include_locked_segments of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_locked_segments = include_locked_segments
@property
def count_source_units(self):
"""Gets the count_source_units of this CreateAnalyseListAsyncDto. # noqa: E501
Default: true # noqa: E501
:return: The count_source_units of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._count_source_units
@count_source_units.setter
def count_source_units(self, count_source_units):
"""Sets the count_source_units of this CreateAnalyseListAsyncDto.
Default: true # noqa: E501
:param count_source_units: The count_source_units of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._count_source_units = count_source_units
@property
def include_trans_memory(self):
"""Gets the include_trans_memory of this CreateAnalyseListAsyncDto. # noqa: E501
Default: true # noqa: E501
:return: The include_trans_memory of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_trans_memory
@include_trans_memory.setter
def include_trans_memory(self, include_trans_memory):
"""Sets the include_trans_memory of this CreateAnalyseListAsyncDto.
Default: true # noqa: E501
:param include_trans_memory: The include_trans_memory of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_trans_memory = include_trans_memory
@property
def include_non_translatables(self):
"""Gets the include_non_translatables of this CreateAnalyseListAsyncDto. # noqa: E501
Default: false. Works only for type=PreAnalyse. # noqa: E501
:return: The include_non_translatables of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_non_translatables
@include_non_translatables.setter
def include_non_translatables(self, include_non_translatables):
"""Sets the include_non_translatables of this CreateAnalyseListAsyncDto.
Default: false. Works only for type=PreAnalyse. # noqa: E501
:param include_non_translatables: The include_non_translatables of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_non_translatables = include_non_translatables
@property
def include_machine_translation_matches(self):
"""Gets the include_machine_translation_matches of this CreateAnalyseListAsyncDto. # noqa: E501
Default: false. Works only for type=PreAnalyse. # noqa: E501
:return: The include_machine_translation_matches of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._include_machine_translation_matches
@include_machine_translation_matches.setter
def include_machine_translation_matches(self, include_machine_translation_matches):
"""Sets the include_machine_translation_matches of this CreateAnalyseListAsyncDto.
Default: false. Works only for type=PreAnalyse. # noqa: E501
:param include_machine_translation_matches: The include_machine_translation_matches of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._include_machine_translation_matches = include_machine_translation_matches
@property
def trans_memory_post_editing(self):
"""Gets the trans_memory_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
Default: false. Works only for type=PostAnalyse. # noqa: E501
:return: The trans_memory_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._trans_memory_post_editing
@trans_memory_post_editing.setter
def trans_memory_post_editing(self, trans_memory_post_editing):
"""Sets the trans_memory_post_editing of this CreateAnalyseListAsyncDto.
Default: false. Works only for type=PostAnalyse. # noqa: E501
:param trans_memory_post_editing: The trans_memory_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._trans_memory_post_editing = trans_memory_post_editing
@property
def non_translatable_post_editing(self):
"""Gets the non_translatable_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
Default: false. Works only for type=PostAnalyse. # noqa: E501
:return: The non_translatable_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._non_translatable_post_editing
@non_translatable_post_editing.setter
def non_translatable_post_editing(self, non_translatable_post_editing):
"""Sets the non_translatable_post_editing of this CreateAnalyseListAsyncDto.
Default: false. Works only for type=PostAnalyse. # noqa: E501
:param non_translatable_post_editing: The non_translatable_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._non_translatable_post_editing = non_translatable_post_editing
@property
def machine_translate_post_editing(self):
"""Gets the machine_translate_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
Default: false. Works only for type=PostAnalyse. # noqa: E501
:return: The machine_translate_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._machine_translate_post_editing
@machine_translate_post_editing.setter
def machine_translate_post_editing(self, machine_translate_post_editing):
"""Sets the machine_translate_post_editing of this CreateAnalyseListAsyncDto.
Default: false. Works only for type=PostAnalyse. # noqa: E501
:param machine_translate_post_editing: The machine_translate_post_editing of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._machine_translate_post_editing = machine_translate_post_editing
@property
def name(self):
"""Gets the name of this CreateAnalyseListAsyncDto. # noqa: E501
:return: The name of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateAnalyseListAsyncDto.
:param name: The name of this CreateAnalyseListAsyncDto. # noqa: E501
:type: str
"""
if name is not None and len(name) > 255:
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if name is not None and len(name) < 0:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
@property
def net_rate_scheme(self):
"""Gets the net_rate_scheme of this CreateAnalyseListAsyncDto. # noqa: E501
:return: The net_rate_scheme of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: IdReference
"""
return self._net_rate_scheme
@net_rate_scheme.setter
def net_rate_scheme(self, net_rate_scheme):
"""Sets the net_rate_scheme of this CreateAnalyseListAsyncDto.
:param net_rate_scheme: The net_rate_scheme of this CreateAnalyseListAsyncDto. # noqa: E501
:type: IdReference
"""
self._net_rate_scheme = net_rate_scheme
@property
def compare_workflow_level(self):
"""Gets the compare_workflow_level of this CreateAnalyseListAsyncDto. # noqa: E501
Required for type=Compare # noqa: E501
:return: The compare_workflow_level of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: int
"""
return self._compare_workflow_level
@compare_workflow_level.setter
def compare_workflow_level(self, compare_workflow_level):
"""Sets the compare_workflow_level of this CreateAnalyseListAsyncDto.
Required for type=Compare # noqa: E501
:param compare_workflow_level: The compare_workflow_level of this CreateAnalyseListAsyncDto. # noqa: E501
:type: int
"""
if compare_workflow_level is not None and compare_workflow_level > 15: # noqa: E501
raise ValueError("Invalid value for `compare_workflow_level`, must be a value less than or equal to `15`") # noqa: E501
if compare_workflow_level is not None and compare_workflow_level < 1: # noqa: E501
raise ValueError("Invalid value for `compare_workflow_level`, must be a value greater than or equal to `1`") # noqa: E501
self._compare_workflow_level = compare_workflow_level
@property
def use_project_analysis_settings(self):
"""Gets the use_project_analysis_settings of this CreateAnalyseListAsyncDto. # noqa: E501
Default: false. Use default project settings. Will be overwritten with setting sent in the API call. # noqa: E501
:return: The use_project_analysis_settings of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: bool
"""
return self._use_project_analysis_settings
@use_project_analysis_settings.setter
def use_project_analysis_settings(self, use_project_analysis_settings):
"""Sets the use_project_analysis_settings of this CreateAnalyseListAsyncDto.
Default: false. Use default project settings. Will be overwritten with setting sent in the API call. # noqa: E501
:param use_project_analysis_settings: The use_project_analysis_settings of this CreateAnalyseListAsyncDto. # noqa: E501
:type: bool
"""
self._use_project_analysis_settings = use_project_analysis_settings
@property
def callback_url(self):
"""Gets the callback_url of this CreateAnalyseListAsyncDto. # noqa: E501
:return: The callback_url of this CreateAnalyseListAsyncDto. # noqa: E501
:rtype: str
"""
return self._callback_url
@callback_url.setter
def callback_url(self, callback_url):
"""Sets the callback_url of this CreateAnalyseListAsyncDto.
:param callback_url: The callback_url of this CreateAnalyseListAsyncDto. # noqa: E501
:type: str
"""
self._callback_url = callback_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CreateAnalyseListAsyncDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateAnalyseListAsyncDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
37321511f55b483428e71701554e9e17bf1df771 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/hrnali002/question1.py | bddd72a0b19d90ef62d339aa08a5e015b73c2dc2 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | """A program to print a list with not duplicated words
Alison Hoernle
HRNALI002
27 April 2014"""
# get input and convert to a list
list = []
strings = input("Enter strings (end with DONE):\n")
while strings != "DONE":
list.append(strings)
strings = input()
print()
print("Unique list:")
# create an empty string and then go through list. Add each word to empty string and if in string already then don't print that word again
counted_words = ''
for string in list:
if string in counted_words:
continue
else:
print(string)
counted_words += string | [
"[email protected]"
] | |
f3a2ad5c32de8876caeae5f5f9095fdd0ef824c5 | 400c569b19d003d0b9d1b31bc1b698ae510cbc46 | /Celestial classification/models.py | d4b60dffc8e997aebb887787f6bf21975ed96fb3 | [] | no_license | as950118/dacon | 05a203ab36375a69549ac39ba3b02a90431c860a | a1489a55a7a53a755d6cf50081522bd7c1c48b4f | refs/heads/master | 2021-02-13T20:06:38.169482 | 2020-03-03T19:51:51 | 2020-03-03T19:51:51 | 244,727,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | import pandas as pd
from sklearn.model_selection import train_test_split
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from data_processing import DataProcessing
random_seed = 0
train_data_path = "./data/train.csv"
test_data_path = "./data/test.csv"
sample_submission_data_path = "./data/sample_submission.csv"
data_processing = DataProcessing(train_data_path, test_data_path, sample_submission_data_path)
train_data, test_data, sample_submission_data = data_processing.load_file()
x_train, x_valid, y_train, y_valid = data_processing.set_data(train_data, test_data)
'''
# catboost
cat_clf = CatBoostClassifier(iterations = 20000, random_state = random_seed, task_type="GPU")
cat_clf.fit(x_train, y_train, eval_set = [(x_train, y_train), (x_valid, y_valid)])
cat_pred = cat_clf.predict_proba(test_data)
submission = pd.DataFrame(data=cat_pred, columns=sample_submission_data.columns, index=sample_submission_data.index)
submission.to_csv('./results/cat_boost2.csv', index=True)
'''
# lgbm
#lgbm_clf = LGBMClassifier(n_estimators = 1000, n_jobs=-1, random_state = random_seed, device = 'gpu')
lgbm_clf = LGBMClassifier(n_estimators = 1000, n_jobs=-1, random_state = random_seed)
lgbm_clf.fit(x_train, y_train, eval_set = [(x_train, y_train), (x_valid, y_valid)])
lgbm_pred = lgbm_clf.predict_proba(test_data)
submission = pd.DataFrame(data=lgbm_pred, columns=sample_submission_data.columns, index=sample_submission_data.index)
submission.to_csv('./results/light_gbm2.csv', index=True)
# xgboost
#xgb_clf = XGBClassifier(n_estimators = 1000, n_jobs=-1, random_state=random_seed, tree_method='gpu_exact')
xgb_clf = XGBClassifier(n_estimators = 1000, n_jobs=-1, random_state=random_seed)
xgb_clf.fit(x_train, y_train, eval_set = [(x_train, y_train), (x_valid, y_valid)])
xgb_pred = xgb_clf.predict_proba(test_data)
submission = pd.DataFrame(data=xgb_pred, columns=sample_submission_data.columns, index=sample_submission_data.index)
submission.to_csv('./results/xg_boost2.csv', index=True)
| [
"[email protected]"
] | |
3994ec01676f94e3b0ed9d34c4e51522f1548082 | 6b3ec47ee410a7d2ed2102cc5bcfa13c7a6342e2 | /bin/easy_install-3.6 | 5d6f8c4e10d68c760d508456eeaaa31b7e59754b | [] | no_license | makkar-nishant123/Refermeframework | fddb912304bdb4ffe3e169fda2d60b4171d8b6c1 | a152f42f6ab63c037bf3f117aa5be1ceb3a1d178 | refs/heads/master | 2020-05-15T23:29:18.684101 | 2019-04-28T17:31:22 | 2019-04-28T17:31:22 | 182,555,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | 6 | #!/Users/nishantmakkar/PycharmProjects/RefermeFramework/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
f529c2813ffd27be60a2c246cf2853fcf650896f | 78912badbaa634d84a93ac03872f18b3f14092a0 | /photosorter-readbuckets.py | 21e4410b93a348af18e57021e9ae46609456fa81 | [] | no_license | mperry8889/photosorter | fc556054ce2af1a50c91c585c80eb6d65ff23f4f | d20c7a51a6e0e7aef4e4eb9260a344d54c52e539 | refs/heads/master | 2021-05-29T06:55:32.482767 | 2011-05-08T17:04:59 | 2011-05-08T17:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | #!/usr/bin/env python
from photosorter import PhotoSorter
from photosorter import Bucket
from photosorter import Photo
if __name__ == "__main__":
p = PhotoSorter()
for bucket in p.buckets:
for state in ["during", "after", "before", "unknown", "unsorted"]:
for photo in getattr(bucket, state):
print "%s %s %s %s %s" % (state, bucket.year, photo.filename, photo.rotation, photo.flip_horizontal)
| [
"none@none"
] | none@none |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.