blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
47f00f80cad38759f334b8bab11475d0fb59109b | 1ed536ef1527e6655217e731f622d643ece49c2b | /scripts/gpipe/predictions2assembly.py | e1a513826d4d3fdb4fe7b6583ffb7338c3e950d1 | [] | no_license | siping/cgat | de0f7af124eb38c72d7dece78fff83ff92ddbf96 | aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e | refs/heads/master | 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,182 | py | ################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
gpipe/predictions2assembly.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/predictions2assembly.py --help
Type::
python gpipe/predictions2assembly.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import getopt
import time
import tempfile
USAGE="""python %s [OPTIONS] < exonerate_output > filtered
Version: $Id: gpipe/predictions2assembly.py 698 2006-07-19 15:53:22Z andreas $
Compile collinear predictions that are spread accross several contigs into new predictions.
Old predictions are translated into the newly created contigs.
If --join-pattern is given, joined contigs are created..
If --joined-pattern contains a %%s, a new file is create for the assembled contig, otherwise
all are written to a single file.
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-g, --genome-file= pattern for filenames with the genomic DNA (FASTA).
-j, --join-pattern= write joined contigs with pattern
-f, --format= input format [predictions]
-i, --max-intron maximum intron length
-d, --max-difference maximum difference between peptide and genomic gap
-c, --contigs= filename with contig sizes
-o, --max-overlap= maximum overlap
-s, --filename-sizes= filename with sizes
""" % sys.argv[0]
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.Intervalls as Intervalls
import CGAT.PredictionParser as PredictionParser
import CGAT.PredictionFile as PredictionFile
param_loglevel = 1
## maximum intron size
param_max_intron = 50000
param_format = "predictions"
param_long_options=["verbose=", "help",
"format=", "max-intron=",
"join-pattern=", "genome-file=",
"max-overlap=",
"max-difference=", "contigs=",
"filename-sizes=", "version"]
param_short_options="v:hf:i:d:c:o:jg:s:"
param_max_difference = 10
# relative permissive overlap
param_max_relative_overlap=50
# absolute permissive overlap
param_max_overlap = 0
param_conserve_frame = 0
param_filename_contigs = "contig_sizes"
param_filename_join_pattern = None
## pattern for genomes, %s is substituted for the sbjct_token
param_genome_file = "genome_%s.fasta"
param_separator_contigs = "-"
param_filename_sizes = None
global_last_filename_genome = None
global_forward_sequences = None
global_reverse_sequences = None
##------------------------------------------------------------
def ProcessSegments( segments ):
"""process a set of segments for a given query.
1. Resolve exon permutations
Exon permutations are streches, were the peptide fragment is
not aligned in the right order to genomic DNA. This is not
crucial here, as we are interested only in the genomic region.
However, we do not want to extend genomic stretches due
to spurious matches. Thus, delete exon permutations at
the beginning and the end and only take the core.
"""
if param_loglevel >= 3: print "## processing %i segments" % ntotal_segments
## combine segments
new_entries = []
for x in range(len(segments) - 1):
for y in range(x, len(segments)):
## check for no overlap on genome
if (min(segments[x].mSbjctGenomeTo, segments[y].mSbjctGenomeTo) - \
max(segments[x].mSbjctGenomeFrom, segments[y].mSbjctGenomeFrom)) > 0:
continue
## check for no overlap of sbjct
if (min(segments[x].mQueryTo, segments[y].mQueryTo) - \
max(segments[x].mQueryFrom, segments[y].mQueryFrom)) > 0:
continue
## check for collinearity
d_aa = segments[y].mQueryFrom - segments[x].mQueryTo + 1
d_na = segments[y].mSbjctGenomeFrom - segments[x].mSbjctGenomeTo
if abs(d_aa * 3 - d_na) < param_max_difference :
dframe = d_na % 3
if param_loglevel >= 2:
print "# collinear sequences with d_aa=%i, d_na=%i, delta=%i, dframe=%i" % \
(d_aa, d_na, d_aa*3 - d_na, dframe )
if param_loglevel >= 3:
print "# part1:", str(segments[x])
print "# part2:", str(segments[y])
if param_conserve_frame and dframe: continue
new_entry = segments[x].GetCopy()
new_entry.Add( segments[y] )
new_entries.append( new_entry )
return new_entries
##------------------------------------------------------------
def ProcessChunk( entries ):
if param_loglevel >= 2: print "# received %i entries." % (len(entries))
## array with predictions after segments have been merged
new_entries = []
if len(entries) > 0:
## sort entries by query and genomic region
entries.sort( lambda x, y: cmp( ( x.mQueryToken, x.mSbjctToken, x.mSbjctStrand, x.mSbjctGenomeFrom),
( y.mQueryToken, y.mSbjctToken, y.mSbjctStrand, y.mSbjctGenomeFrom) ) )
## array with distinct segmental regions
segments = []
last_entry = entries[0]
segments.append( last_entry )
for entry in entries[1:]:
is_new_chunk = 0
## check, if we are within the same "gene"
## same gene is:
## * same query, same chromosome, same strand
## * gap not longer than param_max_intron
if last_entry.mSbjctToken != entry.mSbjctToken or \
last_entry.mSbjctStrand != entry.mSbjctStrand or \
last_entry.mQueryToken != entry.mQueryToken or \
(entry.mSbjctGenomeFrom - last_entry.mSbjctGenomeTo) > param_max_intron:
new_entries += ProcessSegments( segments )
segments = []
segments.append( entry )
last_entry = entry
new_entries += ProcessSegments( segments )
if param_loglevel >= 2:
print "# number of predictions: %i" % len(new_entries)
return new_entries
class BoundaryPredictions:
mPositiveMax = 0
mPositiveMin = 1000000000
mNegativeMax = 0
mNegativeMin = 1000000000
mPositiveMinPredictions = []
mPositiveMaxPredictions = []
mNegativeMinPredictions = []
mNegativeMaxPredictions = []
def __init__(self):
pass
def update( self, prediction ):
if prediction.mSbjctStrand == "+":
if prediction.mSbjctGenomeFrom < self.mPositiveMin:
self.mPositiveMinPredictions = filter(
lambda x: x.mSbjctGenomeFrom <= prediction.mSbjctGenomeFrom + param_max_difference,
self.mPositiveMinPredictions)
self.mPositiveMinPredictions.append(prediction)
self.mPositiveMin = prediction.mSbjctGenomeFrom
elif prediction.mSbjctGenomeFrom <= self.mPositiveMin + param_max_difference:
self.mPositiveMinPredictions.append(prediction)
if prediction.mSbjctGenomeTo > self.mPositiveMax:
self.mPositiveMaxPredictions = filter(
lambda x: x.mSbjctGenomeTo >= prediction.mSbjctGenomeFrom - param_max_difference,
self.mPositiveMaxPredictions)
self.mPositiveMaxPredictions.append(prediction)
self.mPositiveMax = prediction.mSbjctGenomeTo
elif prediction.mSbjctGenomeTo >= self.mPositiveMax - param_max_difference:
self.mPositiveMaxPredictions.append(prediction)
else:
if prediction.mSbjctGenomeFrom < self.mNegativeMin:
self.mNegativeMinPredictions = filter(
lambda x: x.mSbjctGenomeFrom <= prediction.mSbjctGenomeFrom + param_max_difference,
self.mNegativeMinPredictions)
self.mNegativeMinPredictions.append(prediction)
self.mNegativeMin = prediction.mSbjctGenomeFrom
elif prediction.mSbjctGenomeFrom <= self.mNegativeMin + param_max_difference:
self.mNegativeMinPredictions.append(prediction)
if prediction.mSbjctGenomeTo > self.mNegativeMax:
self.mNegativeMaxPredictions = filter(
lambda x: x.mSbjctGenomeTo >= prediction.mSbjctGenomeFrom - param_max_difference,
self.mNegativeMaxPredictions)
self.mNegativeMaxPredictions.append(prediction)
self.mNegativeMax = prediction.mSbjctGenomeTo
elif prediction.mSbjctGenomeTo >= self.mNegativeMax - param_max_difference:
self.mNegativeMaxPredictions.append(prediction)
def __str__( self ):
return "#" + string.join( map( str, (self.mPositiveMin, self.mPositiveMax, self.mNegativeMin, self.mNegativeMax))) + "\n" +\
"# min-positive\n" + string.join( map( str, self.mPositiveMinPredictions), "\n") + "\n" +\
"# max-positive\n" + string.join( map( str, self.mPositiveMaxPredictions), "\n") + "\n" +\
"# min-negative\n" + string.join( map( str, self.mNegativeMinPredictions), "\n") + "\n" +\
"# max-negative\n" + string.join( map( str, self.mNegativeMaxPredictions), "\n") + "\n"
def CheckOverlap( l1, l2 ):
"""check if there are at least two predictions that are collinear."""
results = []
for p1 in l1:
for p2 in l2:
if p1.mQueryToken != p2.mQueryToken: continue
overlap = min(p1.mQueryTo, p2.mQueryTo) - max(p1.mQueryFrom, p2.mQueryFrom)
if 100 * overlap / (p1.mQueryTo - p1.mQueryFrom + 1) >= param_max_relative_overlap or \
100 * overlap / (p2.mQueryTo - p2.mQueryFrom + 1) >= param_max_relative_overlap:
continue
if p1.mQueryTo < p2.mQueryFrom + param_max_overlap or \
p1.mQueryFrom > p2.mQueryTo - param_max_overlap:
results.append( (p1, p2) )
return results
def CheckCollinearity( c1, c2 ):
"""check if there are at least two predictions that are collinear.
Check in all 16 combinations.
"""
results = []
results += CheckOverlap( c1.mPositiveMinPredictions, c2.mPositiveMinPredictions )
results += CheckOverlap( c1.mPositiveMinPredictions, c2.mPositiveMaxPredictions )
results += CheckOverlap( c1.mPositiveMinPredictions, c2.mNegativeMinPredictions )
results += CheckOverlap( c1.mPositiveMinPredictions, c2.mNegativeMaxPredictions )
results += CheckOverlap( c1.mPositiveMaxPredictions, c2.mPositiveMinPredictions )
results += CheckOverlap( c1.mPositiveMaxPredictions, c2.mPositiveMaxPredictions )
results += CheckOverlap( c1.mPositiveMaxPredictions, c2.mNegativeMinPredictions )
results += CheckOverlap( c1.mPositiveMaxPredictions, c2.mNegativeMaxPredictions )
results += CheckOverlap( c1.mNegativeMinPredictions, c2.mPositiveMinPredictions )
results += CheckOverlap( c1.mNegativeMinPredictions, c2.mPositiveMaxPredictions )
results += CheckOverlap( c1.mNegativeMinPredictions, c2.mNegativeMinPredictions )
results += CheckOverlap( c1.mNegativeMinPredictions, c2.mNegativeMaxPredictions )
results += CheckOverlap( c1.mNegativeMaxPredictions, c2.mPositiveMinPredictions )
results += CheckOverlap( c1.mNegativeMaxPredictions, c2.mPositiveMaxPredictions )
results += CheckOverlap( c1.mNegativeMaxPredictions, c2.mNegativeMinPredictions )
results += CheckOverlap( c1.mNegativeMaxPredictions, c2.mNegativeMaxPredictions )
return results
def GetContig( prediction ):
"""get contig sequence for prediction."""
global global_last_filename_genome
global global_forward_sequences
global global_reverse_sequences
if "%s" in param_genome_file:
filename_genome = param_genome_file % prediction.mSbjctToken
else:
filename_genome = param_genome_file
if global_last_filename_genome != filename_genome:
if param_loglevel >= 2:
print "# reading genome %s" % filename_genome
try:
global_forward_sequences, global_reverse_sequences = Genomics.ReadGenomicSequences( open(filename_genome, "r"))
except IOError:
raise "# WARNING: genome %s not found" % filename_genome
global_last_filename_genome = filename_genome
if prediction.mSbjctStrand == "+":
return (prediction.mSbjctToken, global_forward_sequences[ prediction.mSbjctToken ], False)
else:
return (prediction.mSbjctToken, global_reverse_sequences[ prediction.mSbjctToken ], True)
##------------------------------------------------------------
if __name__ == '__main__':
try:
optlist, args = getopt.getopt(sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o,a in optlist:
if o in ( "-v", "--verbose" ):
param_loglevel = int(a)
elif o in ( "-h", "--help" ):
print USAGE
sys.exit(0)
elif o in ( "--version", ):
print "version="
sys.exit(0)
elif o in ("-f", "--format"):
param_format = a
elif o in ("-i", "--max-intron"):
param_max_intron = int(a)
elif o in ("-d", "--max-difference"):
param_max_difference = int(a)
elif o in ("-o", "--max-overlap"):
param_max_overlap = int(a)
elif o in ("-c", "--contigs"):
param_filename_contigs = a
elif o in ("-g", "--genome-file"):
param_genome_file = a
elif o in ("-j", "--join-pattern"):
param_filename_join_pattern = a
elif o in ("-s", "--filename-sizes"):
param_filename_sizes = a
if len(args) > 0:
print USAGE, "no arguments required."
sys.exit(2)
print E.GetHeader()
print E.GetParams()
ninput = 0
max_id = 0
contig_sizes = Genomics.ReadContigSizes( open(param_filename_contigs, "r") )
#############################################################################
## reading predictions
contig = {}
tmp_predictions, filename_tmp_predictions = tempfile.mkstemp()
os.close(tmp_predictions)
tmp_predictions = PredictionFile.PredictionFile()
tmp_predictions.open( filename_tmp_predictions, "w" )
if param_format == "predictions":
last_entry = None
entries = []
for line in sys.stdin:
if line[0] == "#": continue
entry = PredictionParser.PredictionParserEntry( expand = 1 )
try:
entry.Read( line )
except ValueError:
print "# warning: parsing error in line %s" % line[:-1]
continue
ninput += 1
max_id = max( entry.mPredictionId, max_id )
if entry.mSbjctToken not in contig:
contig[entry.mSbjctToken] = BoundaryPredictions()
contig[entry.mSbjctToken].update( entry )
tmp_predictions.append( entry )
if param_loglevel >= 4:
for c in contig.keys():
print "######start of %s #####################################################" % c
print "#", str(contig[c])
print "######end of %s #####################################################" % c
tmp_predictions.close()
max_id += 1
first_pseudo_id = max_id
cc = contig.keys()
#############################################################################
## get pairs of colinear predictions on different contigs
results = []
if param_loglevel >= 1:
print "# finished parsing %i contigs" % len(cc)
sys.stdout.flush()
for c1 in range(len(cc)-1):
if param_loglevel >= 1:
print "# processing: %i/%i" % (c1 + 1, len(cc))
sys.stdout.flush()
for c2 in range(c1+1, len(cc)):
r = CheckCollinearity( contig[cc[c1]], contig[cc[c2]])
if r and param_loglevel >= 3:
print "# --------------------------------------------------------"
print "# %s and %s are collinear" % (cc[c1], cc[c2])
for r1, r2 in r:
print "# ----------------------"
print "#", str(r1)
print "#", str(r2)
results += r
#############################################################################
## cluster co-linear predictions on different contigs by sbjct_token
queries = {}
for r1, r2 in results:
if r1.mQueryToken not in queries:
queries[r1.mQueryToken] = {}
queries[r1.mQueryToken][r1.mPredictionId] = r1
queries[r1.mQueryToken][r2.mPredictionId] = r2
nnew = 0
ncancelled = 0
## set of contigs joined
map_contig2new = {}
## names of new contigs
new_contigs = {}
## remove old contig file, if it already exists.
if param_filename_join_pattern and "%s" not in param_filename_join_pattern:
if os.path.exists( param_filename_join_pattern ):
os.remove( param_filename_join_pattern )
if param_filename_sizes:
outfile_sizes = open(param_filename_sizes, "w")
else:
outfile_sizes = None
#############################################################################
## join contigs
for q in queries.keys():
s = queries[q].values()
s.sort( lambda x,y: cmp( (x.mQueryFrom, x.mQueryTo), (y.mQueryFrom, y.mQueryTo)) )
if param_loglevel >= 2:
print "# -----------------------------------------------"
print "# predictions to be joined for query=", q
for p in s:
print "#", str(p)
print "# -----------------------------------------------"
new_prediction = s[0].GetCopy()
last_contig_size = contig_sizes[new_prediction.mSbjctToken]
do_cancel = False
contigs = []
if param_filename_join_pattern:
contigs.append( GetContig(new_prediction) )
for p in s[1:]:
overlap = new_prediction.mQueryTo - p.mQueryFrom + 1
if overlap > 0:
if overlap > param_max_overlap or \
100 * (p.mQueryTo - p.mQueryFrom + 1) / overlap > param_max_relative_overlap:
print "# dodgy prediction sequence (overlap = %i), joining of contigs cancelled." % overlap
sys.stdout.flush()
do_cancel = True
break
if param_filename_join_pattern:
contigs.append( GetContig(p) )
new_prediction.Add( p,
combine_contig = True,
allow_overlap = True,
contig_size = last_contig_size )
last_contig_size += contig_sizes[p.mSbjctToken]
if do_cancel:
ncancelled += 1
continue
nnew += 1
new_prediction.mPredictionId = max_id
new_prediction.mSbjctStrand = "+"
max_id += 1
print "# joining\t" + string.join( map( lambda x: x.mSbjctToken + x.mSbjctStrand, s), "\t")
if param_filename_join_pattern and len(contigs) > 0:
new_contig = string.join( map( lambda x: x[0], contigs), param_separator_contigs)
## do not write the same contig twice
if new_contig not in new_contigs:
new_contigs[new_contig] = 1
lcontig = len(string.join(map(lambda x: x[1], contigs), ""))
## check if contig already part of a different joined contig
l = 0
for id, sequence, switch in contigs:
if id in map_contig2new:
print "# WARNING: contig %s already joined" % id
map_contig2new[id] = ( new_contig, switch, l, lcontig - l - len(sequence))
l += len(sequence)
## write new contig
if "%s" in param_filename_join_pattern:
filename_genome = param_filename_join_pattern % new_contig
outfile = open(filename_genome, "w")
else:
filename_genome = param_filename_join_pattern
outfile = open(filename_genome, "a")
if outfile_sizes:
outfile_sizes.write( "%s\t%i\t0\n" % ( new_contig, lcontig) )
outfile.write( ">" + new_contig + "\n" + string.join(map(lambda x: x[1], contigs), "") + "\n")
outfile.close()
print str(new_prediction)
if outfile_sizes:
outfile_sizes.close()
#####################################################################################
## move other predictions into the new contigs by translating their coordinates
tmp_predictions.open( mode = "r" )
noutput = 0
ntranslated = 0
for p in tmp_predictions:
if p.mSbjctToken in map_contig2new:
p.mSbjctToken, switch, offset_pos, offset_neg = map_contig2new[p.mSbjctToken]
if (switch and p.mSbjctStrand == "+") or \
(not switch and p.mSbjctStrand == "-"):
offset = offset_neg
else:
offset = offset_pos
## change strand for inverted contigs
if switch:
if p.mSbjctStrand == "+":
p.mSbjctStrand = "-"
else:
p.mSbjctStrand = "+"
p.mSbjctGenomeFrom += offset
p.mSbjctGenomeTo += offset
ntranslated += 1
noutput += 1
print str(p)
if param_loglevel >= 1:
print "## nread=%i, nnew=%i, noutput=%i, ntranslated=%i, first_id=%i" %\
(ninput, nnew, noutput, ntranslated, first_pseudo_id)
print "# ncontigs=%i, npairs=%i, nqueries=%i, nnew=%i, njoined=%i, ncancelled=%i" %\
(len(contig), len(results), len(queries), len(new_contigs), len(map_contig2new), ncancelled)
os.remove( filename_tmp_predictions )
print E.GetFooter()
| [
"[email protected]"
] | |
90c756f512fb32649c9ec73fa43d1af2fd58ef50 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/advanced/164/test_links.py | 8d9d7847acae4547f25225d0f966e663dbd91d85 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,463 | py | import os
from pathlib import Path
import platform
import subprocess
import pytest
# no need to import make_html_links as we call links.py from CLI!
TMP = Path(os.getenv("TMP", "/tmp"))
SCRIPT = 'links.py'
IS_LOCAL = platform.system() == 'Darwin'
MY_CODE = SCRIPT if IS_LOCAL else TMP / SCRIPT
# https://docs.pytest.org/en/latest/tmpdir.html#the-tmpdir-factory-fixture
@pytest.fixture
def my_file(tmp_path):
f = tmp_path / "some_file.txt"
return f
def _create_and_verify_links(my_file, lines, expected_links):
my_file.write_bytes(b'\n'.join(lines))
cmd = f'cat {my_file.r..()} | python {MY_CODE}'
output = subprocess.check_output(cmd, shell=True).splitlines()
assert all(link in output for link in expected_links)
def test_make_html_links_first_data_set(my_file):
lines = [b"https://www.python.org, Python Homepage",
b"bad data,blabla,123",
(b"https://pybit.es/generators.html , "
b"Generators are Awesome "),
b"more bad data"]
expected_links = [(b'<a href="https://www.python.org" target="_blank">'
b'Python Homepage</a>'),
(b'<a href="https://pybit.es/generators.html">'
b'Generators are Awesome</a>')]
_create_and_verify_links(my_file, lines, expected_links)
def test_make_html_links_second_data_set(my_file):
lines = [b"bogus data, again",
b"https://codechalleng.es/bites/ , Bites of Py",
(b"https://stackoverflow.com/a/12927564,How to capture"
b" subprocess.call stdout"),
b"https://pybit.es/,Our labor of love",
b"https://pybit.es/pages/about.html, About Us",
b"https://nu.nl, Dutch news site",
b"And some more bad data !!"]
expected_links = [(b'<a href="https://codechalleng.es/bites/">'
b'Bites of Py</a>'),
(b'<a href="https://stackoverflow.com/a/12927564" '
b'target="_blank">How to capture subprocess.call '
b'stdout</a>'),
b'<a href="https://pybit.es/">Our labor of love</a>',
(b'<a href="https://pybit.es/pages/about.html">'
b'About Us</a>'),
(b'<a href="https://nu.nl" target="_blank">'
b'Dutch news site</a>')]
_create_and_verify_links(my_file, lines, expected_links)
| [
"[email protected]"
] | |
7b6a4d6df8f67ad85480b8a58255130a952e5298 | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/model/flowable_service/bpmn_end_event_pb2.py | 2ff0cb3c70459b361343271db62a5a44c17ff8f6 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,046 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: bpmn_end_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from monitor_sdk.model.flowable_service import bpmn_links_pb2 as monitor__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='bpmn_end_event.proto',
package='flowable_service',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_service'),
serialized_pb=_b('\n\x14\x62pmn_end_event.proto\x12\x10\x66lowable_service\x1a\x33monitor_sdk/model/flowable_service/bpmn_links.proto\"F\n\x0c\x42PMNEndEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x05links\x18\x02 \x01(\x0b\x32\x1b.flowable_service.BPMNLinksBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_serviceb\x06proto3')
,
dependencies=[monitor__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2.DESCRIPTOR,])
_BPMNENDEVENT = _descriptor.Descriptor(
name='BPMNEndEvent',
full_name='flowable_service.BPMNEndEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flowable_service.BPMNEndEvent.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='links', full_name='flowable_service.BPMNEndEvent.links', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=165,
)
_BPMNENDEVENT.fields_by_name['links'].message_type = monitor__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2._BPMNLINKS
DESCRIPTOR.message_types_by_name['BPMNEndEvent'] = _BPMNENDEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BPMNEndEvent = _reflection.GeneratedProtocolMessageType('BPMNEndEvent', (_message.Message,), {
'DESCRIPTOR' : _BPMNENDEVENT,
'__module__' : 'bpmn_end_event_pb2'
# @@protoc_insertion_point(class_scope:flowable_service.BPMNEndEvent)
})
_sym_db.RegisterMessage(BPMNEndEvent)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
dbe0605dfc8836252a7be025717a2c025a583ef3 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/iotsecurity/v20210201preview/__init__.py | 4fdb5150fbe12dc9d3eb365657246551620919aa | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .defender_setting import *
from .get_defender_setting import *
from .get_on_premise_sensor import *
from .get_sensor import *
from .get_site import *
from .on_premise_sensor import *
from .sensor import *
from .site import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:iotsecurity/v20210201preview:DefenderSetting":
return DefenderSetting(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:iotsecurity/v20210201preview:OnPremiseSensor":
return OnPremiseSensor(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:iotsecurity/v20210201preview:Sensor":
return Sensor(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:iotsecurity/v20210201preview:Site":
return Site(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "iotsecurity/v20210201preview", _module_instance)
_register_module()
| [
"[email protected]"
] | |
aacd17774b7ca947c61eb332e74e803315d096d0 | 7b60c68ddda39ef82f5d49404bbcf62cc83e4860 | /sleftry/LSTM/one LSTM.py | 0ed0f8660cd59c49d978a0f837371a5b5509b64f | [] | no_license | joycejhang/learningml | da802e0ab9cfb6cce89791561870c0078cfaaaf9 | 884ed0541bcb257bb82e77c126ab77c927fe9add | refs/heads/master | 2020-04-22T15:04:58.445844 | 2019-07-04T11:31:03 | 2019-07-04T11:31:03 | 170,466,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,884 | py |
import tokenFile
import numpy as np
# 输出单元激活函数
def softmax(x):
x = np.array(x)
max_x = np.max(x)
return np.exp(x-max_x) / np.sum(np.exp(x-max_x))
def sigmoid(x):
return 1.0/(1.0 + np.exp(-x))
def tanh(x):
return (np.exp(x) - np.exp(-x))/(np.exp(x) + np.exp(-x))
class myLSTM:
def __init__(self, data_dim, hidden_dim=100):
# data_dim: 词向量维度,即词典长度; hidden_dim: 隐单元维度
self.data_dim = data_dim
self.hidden_dim = hidden_dim
# 初始化权重向量
self.whi, self.wxi, self.bi = self._init_wh_wx()
self.whf, self.wxf, self.bf = self._init_wh_wx()
self.who, self.wxo, self.bo = self._init_wh_wx()
self.wha, self.wxa, self.ba = self._init_wh_wx()
self.wy, self.by = np.random.uniform(-np.sqrt(1.0/self.hidden_dim), np.sqrt(1.0/self.hidden_dim),
(self.data_dim, self.hidden_dim)), \
np.random.uniform(-np.sqrt(1.0/self.hidden_dim), np.sqrt(1.0/self.hidden_dim),
(self.data_dim, 1))
# 初始化 wh, wx, b
def _init_wh_wx(self):
wh = np.random.uniform(-np.sqrt(1.0/self.hidden_dim), np.sqrt(1.0/self.hidden_dim),
(self.hidden_dim, self.hidden_dim))
wx = np.random.uniform(-np.sqrt(1.0/self.data_dim), np.sqrt(1.0/self.data_dim),
(self.hidden_dim, self.data_dim))
b = np.random.uniform(-np.sqrt(1.0/self.data_dim), np.sqrt(1.0/self.data_dim),
(self.hidden_dim, 1))
return wh, wx, b
# 初始化各个状态向量
def _init_s(self, T):
iss = np.array([np.zeros((self.hidden_dim, 1))] * (T + 1)) # input gate
fss = np.array([np.zeros((self.hidden_dim, 1))] * (T + 1)) # forget gate
oss = np.array([np.zeros((self.hidden_dim, 1))] * (T + 1)) # output gate
ass = np.array([np.zeros((self.hidden_dim, 1))] * (T + 1)) # current inputstate
hss = np.array([np.zeros((self.hidden_dim, 1))] * (T + 1)) # hidden state
css = np.array([np.zeros((self.hidden_dim, 1))] * (T + 1)) # cell state
ys = np.array([np.zeros((self.data_dim, 1))] * T) # output value
return {'iss': iss, 'fss': fss, 'oss': oss,
'ass': ass, 'hss': hss, 'css': css,
'ys': ys}
# 前向传播,单个x
def forward(self, x):
# 向量时间长度
T = len(x)
# 初始化各个状态向量
stats = self._init_s(T)
for t in range(T):
# 前一时刻隐藏状态
ht_pre = np.array(stats['hss'][t-1]).reshape(-1, 1)
# input gate
stats['iss'][t] = self._cal_gate(self.whi, self.wxi, self.bi, ht_pre, x[t], sigmoid)
# forget gate
stats['fss'][t] = self._cal_gate(self.whf, self.wxf, self.bf, ht_pre, x[t], sigmoid)
# output gate
stats['oss'][t] = self._cal_gate(self.who, self.wxo, self.bo, ht_pre, x[t], sigmoid)
# current inputstate
stats['ass'][t] = self._cal_gate(self.wha, self.wxa, self.ba, ht_pre, x[t], tanh)
# cell state, ct = ft * ct_pre + it * at
stats['css'][t] = stats['fss'][t] * stats['css'][t-1] + stats['iss'][t] * stats['ass'][t]
# hidden state, ht = ot * tanh(ct)
stats['hss'][t] = stats['oss'][t] * tanh(stats['css'][t])
# output value, yt = softmax(self.wy.dot(ht) + self.by)
stats['ys'][t] = softmax(self.wy.dot(stats['hss'][t]) + self.by)
return stats
# 计算各个门的输出
def _cal_gate(self, wh, wx, b, ht_pre, x, activation):
return activation(wh.dot(ht_pre) + wx[:, x].reshape(-1,1) + b)
# 预测输出,单个x
def predict(self, x):
stats = self.forward(x)
pre_y = np.argmax(stats['ys'].reshape(len(x), -1), axis=1)
return pre_y
# 计算损失, softmax交叉熵损失函数, (x,y)为多个样本
def loss(self, x, y):
cost = 0
for i in xrange(len(y)):
stats = self.forward(x[i])
# 取出 y[i] 中每一时刻对应的预测值
pre_yi = stats['ys'][xrange(len(y[i])), y[i]]
cost -= np.sum(np.log(pre_yi))
# 统计所有y中词的个数, 计算平均损失
N = np.sum([len(yi) for yi in y])
ave_loss = cost / N
return ave_loss
# 初始化偏导数 dwh, dwx, db
def _init_wh_wx_grad(self):
dwh = np.zeros(self.whi.shape)
dwx = np.zeros(self.wxi.shape)
db = np.zeros(self.bi.shape)
return dwh, dwx, db
# 求梯度, (x,y)为一个样本
def bptt(self, x, y):
dwhi, dwxi, dbi = self._init_wh_wx_grad()
dwhf, dwxf, dbf = self._init_wh_wx_grad()
dwho, dwxo, dbo = self._init_wh_wx_grad()
dwha, dwxa, dba = self._init_wh_wx_grad()
dwy, dby = np.zeros(self.wy.shape), np.zeros(self.by.shape)
# 初始化 delta_ct,因为后向传播过程中,此值需要累加
delta_ct = np.zeros((self.hidden_dim, 1))
# 前向计算
stats = self.forward(x)
# 目标函数对输出 y 的偏导数
delta_o = stats['ys']
delta_o[np.arange(len(y)), y] -= 1
for t in np.arange(len(y))[::-1]:
# 输出层wy, by的偏导数,由于所有时刻的输出共享输出权值矩阵,故所有时刻累加
dwy += delta_o[t].dot(stats['hss'][t].reshape(1, -1))
dby += delta_o[t]
# 目标函数对隐藏状态的偏导数
delta_ht = self.wy.T.dot(delta_o[t])
# 各个门及状态单元的偏导数
delta_ot = delta_ht * tanh(stats['css'][t])
delta_ct += delta_ht * stats['oss'][t] * (1-tanh(stats['css'][t])**2)
delta_it = delta_ct * stats['ass'][t]
delta_ft = delta_ct * stats['css'][t-1]
delta_at = delta_ct * stats['iss'][t]
delta_at_net = delta_at * (1-stats['ass'][t]**2)
delta_it_net = delta_it * stats['iss'][t] * (1-stats['iss'][t])
delta_ft_net = delta_ft * stats['fss'][t] * (1-stats['fss'][t])
delta_ot_net = delta_ot * stats['oss'][t] * (1-stats['oss'][t])
# 更新各权重矩阵的偏导数,由于所有时刻共享权值,故所有时刻累加
dwhf, dwxf, dbf = self._cal_grad_delta(dwhf, dwxf, dbf, delta_ft_net, stats['hss'][t-1], x[t])
dwhi, dwxi, dbi = self._cal_grad_delta(dwhi, dwxi, dbi, delta_it_net, stats['hss'][t-1], x[t])
dwha, dwxa, dba = self._cal_grad_delta(dwha, dwxa, dba, delta_at_net, stats['hss'][t-1], x[t])
dwho, dwxo, dbo = self._cal_grad_delta(dwho, dwxo, dbo, delta_ot_net, stats['hss'][t-1], x[t])
return [dwhf, dwxf, dbf,
dwhi, dwxi, dbi,
dwha, dwxa, dba,
dwho, dwxo, dbo,
dwy, dby]
# 更新各权重矩阵的偏导数
def _cal_grad_delta(self, dwh, dwx, db, delta_net, ht_pre, x):
dwh += delta_net * ht_pre
dwx += delta_net * x
db += delta_net
return dwh, dwx, db
# 计算梯度, (x,y)为一个样本
def sgd_step(self, x, y, learning_rate):
dwhf, dwxf, dbf, \
dwhi, dwxi, dbi, \
dwha, dwxa, dba, \
dwho, dwxo, dbo, \
dwy, dby = self.bptt(x, y)
# 更新权重矩阵
self.whf, self.wxf, self.bf = self._update_wh_wx(learning_rate, self.whf, self.wxf, self.bf, dwhf, dwxf, dbf)
self.whi, self.wxi, self.bi = self._update_wh_wx(learning_rate, self.whi, self.wxi, self.bi, dwhi, dwxi, dbi)
self.wha, self.wxa, self.ba = self._update_wh_wx(learning_rate, self.wha, self.wxa, self.ba, dwha, dwxa, dba)
self.who, self.wxo, self.bo = self._update_wh_wx(learning_rate, self.who, self.wxo, self.bo, dwho, dwxo, dbo)
self.wy, self.by = self.wy - learning_rate * dwy, self.by - learning_rate * dby
# 更新权重矩阵
def _update_wh_wx(self, learning_rate, wh, wx, b, dwh, dwx, db):
wh -= learning_rate * dwh
wx -= learning_rate * dwx
b -= learning_rate * db
return wh, wx, b
# 训练 LSTM
def train(self, X_train, y_train, learning_rate=0.005, n_epoch=5):
losses = []
num_examples = 0
for epoch in xrange(n_epoch):
for i in xrange(len(y_train)):
self.sgd_step(X_train[i], y_train[i], learning_rate)
num_examples += 1
loss = self.loss(X_train, y_train)
losses.append(loss)
print('epoch {0}: loss = {1}'.format(epoch+1, loss))
if len(losses) > 1 and losses[-1] > losses[-2]:
learning_rate *= 0.5
print('decrease learning_rate to', learning_rate)
# 获取数据
file_path = r'/home/display/pypys/practices/rnn/results-20170508-103637.csv'
dict_size = 8000
myTokenFile = tokenFile.tokenFile2vector(file_path, dict_size)
X_train, y_train, dict_words, index_of_words = myTokenFile.get_vector()
# 训练LSTM
lstm = myLSTM(dict_size, hidden_dim=100)
lstm.train(X_train[:200], y_train[:200],
learning_rate=0.005,
n_epoch=3) | [
"[email protected]"
] | |
38208982c2d014d233c50e8817e0a80f71c021ff | 2f963d7989749037a3ec27aaa39b31416b33cbb2 | /ib_users/views/get_user/get_user.py | 183ff7a74ce5cd00bbb88a71c9efbd66b445cd96 | [] | no_license | migsantos121/phd3-backend | 3cd014908856c995de3c4473d82059bc9c1b5794 | 9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e | refs/heads/master | 2022-12-12T17:25:59.334509 | 2020-03-09T09:24:08 | 2020-03-09T09:24:08 | 245,991,086 | 0 | 0 | null | 2022-06-28T14:45:50 | 2020-03-09T09:17:18 | Python | UTF-8 | Python | false | false | 1,711 | py | def get_user(*args, **kwargs):
"""
Note: replace below mock implementation with your actual implementation
Request:
kwargs["user"] -> request user
kwargs["request_object"] -> request body type object
kwargs["request_data"] -> request body data dict
kwargs["request_headers_obj"] -> request headers object
kwargs["request_query_params"] -> request query parameters object
Response :
return: tuple(response_status_code, response_object, response_headers_object)
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response import endpoint_response
return endpoint_response(response_object)
"""
# mock response implementation starts
# from ib_users.views.get_user.tests.test_case_01 import test_case
# from django_swagger_utils.drf_server.utils.server_gen.mock_response import mock_response
# response_tuple = mock_response(app_name="ib_users", operation_name="get_user", test_case=test_case,
# kwargs=kwargs)
# end of mock view implementation
user = kwargs["user"]
request_data= kwargs['request_data']
source = args[0].META.get('HTTP_X_SOURCE', '')
if not source:
source = args[0].META.get('HTTP_SOURCE', '')
from ib_users.models.ib_user import IBUser
response_object = IBUser.get_user_details(user, source)
print response_object
# uncomment below lines for actual implementation when you have response_object / response_data
from django_swagger_utils.drf_server.utils.server_gen.endpoint_response import endpoint_response
response_tuple = endpoint_response(response_object)
return response_tuple
| [
"[email protected]"
] | |
9d8cc9f43fad40403b595b3943f0646b153de858 | 4d0f3e2d7455f80caea978e4e70621d50c6c7561 | /MongoDB/BigData_Bulk/Update_unordered.py | 0879aac9fe0ae108a62abacca7d7a3a7501a65e4 | [] | no_license | mhdr/PythonSamples | 66940ee2353872d2947c459e3865be42140329c6 | 1a9dccc05962033ea02b081a39cd67c1e7b29d0c | refs/heads/master | 2020-04-14T01:10:13.033940 | 2016-05-28T15:33:52 | 2016-05-28T15:33:52 | 30,691,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | # http://api.mongodb.org/python/current/examples/bulk.html
__author__ = 'Mahmood'
import time
from pymongo import MongoClient
from bson.objectid import ObjectId
client=MongoClient()
# drop last db
client.drop_database("test-db")
# database
db=client["test-db"]
# table
people_bulk=db["People"].initialize_unordered_bulk_op()
people=db["People"]
print("Starting insert loop : {0}".format(time.asctime( time.localtime(time.time()) )))
for i in range(1,100000):
new_person={"FirstName" : "Mahmood",
"LastName" : "Ramzani",
"Gender": "Male",
"BirthDate":{"Year":1985,"Month":5,"Day":22},
"Country":"Iran",
"City":"Rasht",
"email":"[email protected]",
"user_name":"mahmoodramzani",
"password":"1234"}
ids=people_bulk.insert(new_person)
print("End of insert loop : {0}".format(time.asctime( time.localtime(time.time()) )))
print("Starting execute : {0}".format(time.asctime( time.localtime(time.time()) )))
people_bulk.execute()
print("End of execute : {0}".format(time.asctime( time.localtime(time.time()) )))
matches=people.find()
# initialize again because you can run execute command once
people_bulk=db["People"].initialize_ordered_bulk_op()
print("Starting update loop: {0}".format(time.asctime( time.localtime(time.time()) )))
for match in matches:
match["LastName"]="Ramzani Sesemasi"
id=match["_id"]
people_bulk.find({"_id":id}).update({"$set":match})
print("End of update loop: {0}".format(time.asctime( time.localtime(time.time()) )))
print("Starting execute : {0}".format(time.asctime( time.localtime(time.time()) )))
people_bulk.execute()
print("End of execute : {0}".format(time.asctime( time.localtime(time.time()) ))) | [
"[email protected]"
] | |
abb9e8d07146fd93787a23e3c00f4f035f2e6a3c | d767a2048c050421e7213be2ecccff09014e270e | /Day 24/Set Intersection(Hackerrank).py | 86fd4076cac1c920fcd2f36da4bae154bb48429b | [] | no_license | Benson1198/31-Days-of-CP | 23ff16f9899d37e2ca9a1eba81a87b521233fd2f | 0e5de1d0b4e1d4811fb096455de951f37c3d69d0 | refs/heads/master | 2022-09-18T22:26:53.178381 | 2020-06-03T14:20:41 | 2020-06-03T14:20:41 | 260,527,724 | 2 | 1 | null | 2020-05-04T17:36:36 | 2020-05-01T18:15:21 | Python | UTF-8 | Python | false | false | 173 | py | for i in range(int(input())):
a = set(map(int,input().split()))
break
for i in range(int(input())):
b = set(map(int,input().split()))
break
print(len(a&b)) | [
"[email protected]"
] | |
22bbe05b376afb7261d2ccc0c655fab46d2139f9 | d42a9128898d504a9831f1afee3198c4677236c9 | /Level_2/카펫.py | 7590489b9346b70f2da4fc35051fa1209028eb67 | [] | no_license | ketkat001/Programmers-coding | 6848a9c8cffd97b792cfc8856ec135b72af5d688 | 799baba8d66a9971b43233d231cecbf262b4ea27 | refs/heads/master | 2023-09-02T23:07:25.614820 | 2021-10-17T18:12:02 | 2021-10-17T18:12:02 | 235,016,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | def solution(brown, yellow):
answer = []
for i in range(yellow, 0, -1):
if yellow % i != 0:
continue
if (2*i) + (2*(yellow//i)) + 4 == brown:
answer.append(i+2)
answer.append(yellow//i+2)
break
return answer
print(solution(10, 2)) | [
"[email protected]"
] | |
917503f915820aedab3034d4fbbbcb034b44f88e | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week02/2/singupshimo_20200705174440.py | ce44289871c2554146760f435e3113f67b10850c | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 73 | py | from selenium import webdriver
import time
try:
browser = webdriver. | [
"[email protected]"
] | |
c3d4e81a3c0d45e1f41190732262efc7428078ae | 5f1ce3d168695f38f8fec53ab56d464042fbcbae | /meraki_v1/api/insight.py | 48f25659153929344e3df744090bf80d0ca950b4 | [
"MIT"
] | permissive | fsandberg/dashboard-api-python | e209b927f1b5d1b29a76e36def1587b873764b0f | c01ff038643a39bd12660d2719375eeb05c7ba24 | refs/heads/master | 2022-06-23T06:25:37.599413 | 2020-05-11T16:47:31 | 2020-05-11T16:47:31 | 263,100,991 | 0 | 0 | MIT | 2020-05-11T16:43:14 | 2020-05-11T16:43:13 | null | UTF-8 | Python | false | false | 4,514 | py | class Insight(object):
def __init__(self, session):
super(Insight, self).__init__()
self._session = session
def getOrganizationInsightMonitoredMediaServers(self, organizationId: str):
"""
**List the monitored media servers for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!get-organization-insight-monitored-media-servers
- organizationId (string)
"""
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'getOrganizationInsightMonitoredMediaServers',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers'
return self._session.get(metadata, resource)
def createOrganizationInsightMonitoredMediaServer(self, organizationId: str, name: str, address: str):
"""
**Add a media server to be monitored for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!create-organization-insight-monitored-media-server
- organizationId (string)
- name (string): The name of the VoIP provider
- address (string): The IP address (IPv4 only) or hostname of the media server to monitor
"""
kwargs = locals()
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'createOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers'
body_params = ['name', 'address']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationInsightMonitoredMediaServer(self, organizationId: str, monitoredMediaServerId: str):
"""
**Return a monitored media server for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!get-organization-insight-monitored-media-server
- organizationId (string)
- monitoredMediaServerId (string)
"""
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'getOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers/{monitoredMediaServerId}'
return self._session.get(metadata, resource)
def updateOrganizationInsightMonitoredMediaServer(self, organizationId: str, monitoredMediaServerId: str, **kwargs):
"""
**Update a monitored media server for this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!update-organization-insight-monitored-media-server
- organizationId (string)
- monitoredMediaServerId (string)
- name (string): The name of the VoIP provider
- address (string): The IP address (IPv4 only) or hostname of the media server to monitor
"""
kwargs.update(locals())
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'updateOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers/{monitoredMediaServerId}'
body_params = ['name', 'address']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationInsightMonitoredMediaServer(self, organizationId: str, monitoredMediaServerId: str):
"""
**Delete a monitored media server from this organization. Only valid for organizations with Meraki Insight.**
https://developer.cisco.com/docs/meraki-api-v1/#!delete-organization-insight-monitored-media-server
- organizationId (string)
- monitoredMediaServerId (string)
"""
metadata = {
'tags': ['insight', 'configure', 'monitoredMediaServers'],
'operation': 'deleteOrganizationInsightMonitoredMediaServer',
}
resource = f'/organizations/{organizationId}/insight/monitoredMediaServers/{monitoredMediaServerId}'
return self._session.delete(metadata, resource)
| [
"[email protected]"
] | |
0096083f9246d661a3f54e1a514347e517ac5392 | c074fb834cb4a8ac75d107146df10f9496590792 | /shows/migrations/0013_auto_20200924_0651.py | 48e9ac2712f64fd60afd0c97b326d8e90fc0b462 | [
"Unlicense"
] | permissive | jmhubbard/quote_of_the_day_custom_user | 4d5ffd4183d7e6290161b84cae2aa1f7ad621a99 | 27024b2953c1c94fd2970563c3ab31ad444912b6 | refs/heads/master | 2023-02-19T00:59:27.372671 | 2021-01-10T02:45:56 | 2021-01-10T02:45:56 | 293,443,918 | 1 | 0 | Unlicense | 2020-12-03T17:59:59 | 2020-09-07T06:41:25 | Python | UTF-8 | Python | false | false | 363 | py | # Generated by Django 3.1.1 on 2020-09-24 06:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shows', '0012_remove_episode_number'),
]
operations = [
migrations.AlterUniqueTogether(
name='episode',
unique_together={('name', 'season', 'show')},
),
]
| [
"[email protected]"
] | |
5d77eb09e4b6cf34c16b1f1dd27fa8980347f031 | cb30d1a3a4fa6c8f7a6f89a671fbdb4a808e19e3 | /c3/func-name.py | 68b47cdad8ff7c3dad23e07a02f312777672a6df | [] | no_license | systemchip/python-for-everyone | 0b45172ca5b41c3b5fc1a835fbccf4a479c282ea | 9fb7f751a97fb6a110079e1e3e1dd9601fb24374 | refs/heads/master | 2021-09-02T09:18:22.013704 | 2017-07-17T07:46:19 | 2017-07-17T07:46:19 | 115,913,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | # 함수 정의
def calcTime(dist, speed):
t = dist / speed
t = round(t, 1)
return t
# 일반적인 호출 --- (*1)
print( calcTime(500, 100) )
# 키워드 인수를 사용한 호출 --- (*2)
print( calcTime(dist=500, speed=100) )
| [
"[email protected]"
] | |
1f320c3de6e2963e75238e0f5091347cc938b7a7 | 85ac9f05432a2a4299cb43969395fd7865e78267 | /entrega1/src/dump_movies_info.py | b32c447efa6d015870e60b43c41c52b8932cb071 | [] | no_license | pablodanielrey/twss | 72d8056c2f3fd2a70d465d3176802dbc019fd022 | b533fa6e0ea86460d8ccb49ec554a6f6e7ab4352 | refs/heads/master | 2023-05-20T03:06:23.078921 | 2021-06-12T23:31:13 | 2021-06-12T23:31:13 | 352,428,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | """
script utilitario para hacer un json con info plana sobre cada película.
solo para proceso de debug y verificación
procesa el archivo de merge
"""
import json
import datetime
from common import Merge, MergeInfo, Movie, Show
if __name__ == '__main__':
movies = []
with open('data/merged.json', 'r') as f:
merges = json.loads(f.read())
_movies = merges[Merge.MOVIES.value]
for m in _movies:
m['SHOWS'] = [s for s in merges[Merge.SHOWS.value] if s[Show.MOVIE.value] == m[Movie.ID.value]]
m['MERGES'] = [s[MergeInfo.MOVIES.value] for s in merges[Merge.MERGES.value] if s[MergeInfo.NEW_ID.value] == m[Movie.ID.value]]
movies.append(m)
dumped = {
'DATE': str(datetime.datetime.utcnow()),
'MOVIES': movies
}
with open('data/dumped.json', 'w') as f:
f.write(json.dumps(dumped, ensure_ascii=False)) | [
"[email protected]"
] | |
4c6392fa5873487af848e8d1467ac221f4033f0b | 2d1769af5eee0c764e1a917fca7a0f58c8751a13 | /cnn/faces/face_utils.py | 73aa05568ee350ac35e6283d6bcfd48a4e504288 | [] | no_license | m-learning/tensorflow_ann_modules | aca785991ba9700da0a1e7a70cce9f32fc6ac6e9 | 914a9fb2c6a4400ea4b7643e3f3fd6aac5f94f8d | refs/heads/master | 2020-05-22T03:59:18.650205 | 2017-08-27T14:34:16 | 2017-08-27T14:34:16 | 61,343,012 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | """
Created on Jan 12, 2017
Utility module for FaceNet model
@author: Levan Tsinadze
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from scipy import misc
from cnn.faces import detect_face, facenet
import numpy as np
import tensorflow as tf
GRAPH_FILE = 'face_embeddings.pb'
INPUT_NAME = 'input'
INPUT_LAYER = 'input:0'
TRAIN_LAYER = 'phase_train:0'
EMBEDDINGS_LAYER = 'embeddings:0'
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction, _files):
"""Loads and alighn face images from files
Args:
image_paths - image file paths
image_size - image size
margin - margin for alignment
gpu_memory_fraction - GPU memory fraction for parallel processing
Returns:
images - aligned images from files
"""
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default() as g:
sess = tf.Session(graph=g, config=tf.ConfigProto(log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, _files.model_dir)
nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in xrange(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images | [
"[email protected]"
] | |
ada1387065207483a988baa6ad1f33206c2dd449 | e4066b34668bbf7fccd2ff20deb0d53392350982 | /project_scrapy/spiders/dulux.py | 34f2fba74f7d2b55e236d9094a345fdac84c6e22 | [] | no_license | sushma535/WebSites | 24a688b86e1c6571110f20421533f0e7fdf6e1a8 | 16a3bfa44e6c7e22ae230f5b336a059817871a97 | refs/heads/master | 2023-08-18T09:09:16.052555 | 2021-10-11T00:41:50 | 2021-10-11T00:41:50 | 415,621,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | import scrapy
from scrapy.crawler import CrawlerProcess
import os
import csv
from csv import reader
import re
total_data = {}
class SimilarWeb(scrapy.Spider):
name = 'SW'
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
start_urls = ['https://www.dulux.com.au/', 'https://www.similarsites.com/site/dulux.com.au/']
csv_columns = ['Category', 'Description', 'Name', 'Url']
csv_file = 'websites1_data.csv'
count = 0
def parse(self, response):
data, desc, cat = '', '', ''
print('response url:', response.url)
if response.url == self.start_urls[0]:
data = response.css('title::text').get()
if data:
data = re.sub("\n\t\t", '', data)
total_data['Name'] = data
self.count += 1
elif response.url == self.start_urls[1]:
cat = response.css(
'div[class="StatisticsCategoriesDistribution__CategoryTitle-fnuckk-6 jsMDeK"]::text').getall()
desc = response.css('div[class="SiteHeader__Description-sc-1ybnx66-8 hhZNQm"]::text').get()
if cat:
cat = ": ".join(cat[:])
total_data['Category'] = cat
total_data['Description'] = desc
total_data['Url'] = self.start_urls[0]
self.count += 1
if self.count == 2:
print("total data", total_data)
new_data = [total_data['Category'], total_data['Description'], total_data['Name'],
total_data['Url']]
print("new data", new_data)
self.row_appending_to_csv_file(new_data)
def row_appending_to_csv_file(self, data):
if os.path.exists(self.csv_file):
need_to_add_headers = False
with open(self.csv_file, 'a+', newline='') as file:
file.seek(0)
csv_reader = reader(file)
if len(list(csv_reader)) == 0:
need_to_add_headers = True
csv_writer = csv.writer(file)
if need_to_add_headers:
csv_writer.writerow(self.csv_columns)
csv_writer.writerow(data)
else:
with open(self.csv_file, 'w', newline='') as file:
csv_writer = csv.writer(file)
csv_writer.writerow(self.csv_columns) # header
csv_writer.writerow(data)
process = CrawlerProcess()
process.crawl(SimilarWeb)
process.start()
| [
"[email protected]"
] | |
c5b11a0dd5db5c6fdf0dfbc2ffa87ba1ab6f03e7 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /WxkFoXTLYiAq57uDq_6.py | ef17e6cb7e3ba1e66c2d2c4130f086ae67aa0da7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,880 | py | """
The insurance guy calls. They were about to pay you all that fortune you've
been anxiously waiting for, but they detected further irregularities; the list
of stolen items is misformatted and appears to contain other entries that
don't belong there. Find and remove them.
You receive a dict with nested dicts with `strings` as values. Convert their
values to `number` and return a dict with entries that evaluate to type `int`.
### Examples
find_and_remove({
"workshop": {
"bedsheets": "2000",
"working": "v0g89t7t",
"pen": "370",
"movies": "wo1a3d5d",
},
}), {
"workshop": {
"bedsheets": 2000,
"pen": 370
}
}
find_and_remove({
"bedroom": {
"slippers": "10000",
"piano": "5500",
"call": "vet",
"travel": "world",
},
}), {
"bedroom": {
"slippers": 10000,
"piano": 5500,
},
}
### Notes
* This challenge was translated from Miguel Carvalho's JavaScript Burglary Series. The following are links to his Javascript series:
* If you have suggestions on how to present or further test this challenge please leave a comment.
* This series is part of a [collection that focuses on objects](https://edabit.com/collection/6NzWEMSwrSw4fnKkL). If you are interested in following the breath-taking narrative skills of yours truly or just do some object focused challenges (the challenges are ordered in ascending difficulty order), you can more easily [do that here](https://edabit.com/collection/6NzWEMSwrSw4fnKkL).
"""
def find_and_remove(dct):
for room,items in dct.items():
for item,price in items.items():
try:
dct[room][item] = int(price)
except:
dct[room][item] = -1
dct[room] = {k:v for k,v in items.items() if v > 0}
return dct
| [
"[email protected]"
] | |
800ad7f7ceb5c6d0b024288fbf63c40b291b9cf8 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit_noisy398.py | 211c288f57aab268aa0776e4eb56d9b981a0e392 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,510 | py | # qubit number=3
# total number=78
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=59
prog.cz(input_qubit[0],input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=61
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=71
prog.cz(input_qubit[0],input_qubit[2]) # number=72
prog.h(input_qubit[2]) # number=73
prog.x(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=67
prog.cz(input_qubit[0],input_qubit[2]) # number=68
prog.h(input_qubit[2]) # number=69
prog.h(input_qubit[2]) # number=64
prog.cz(input_qubit[0],input_qubit[2]) # number=65
prog.h(input_qubit[2]) # number=66
prog.h(input_qubit[2]) # number=75
prog.cz(input_qubit[0],input_qubit[2]) # number=76
prog.h(input_qubit[2]) # number=77
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.rx(2.3310617489636263,input_qubit[2]) # number=58
prog.x(input_qubit[2]) # number=74
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=62
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.cx(input_qubit[2],input_qubit[1]) # number=63
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.cx(input_qubit[2],input_qubit[1]) # number=70
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy398.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
55a8ce672f93c1ac8c14ad9db132aa3cb25e038f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /EYojuPCtvSzF2chkZ_24.py | 1b12d28fa0ef8d5bd219097ad16e1cf7a8231166 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | """
Create a function that returns the selected **filename** from a path. Include
the **extension** in your answer.
### Examples
get_filename("C:/Projects/pil_tests/ascii/edabit.txt") ➞ "edabit.txt"
get_filename("C:/Users/johnsmith/Music/Beethoven_5.mp3") ➞ "Beethoven_5.mp3"
get_filename("ffprobe.exe") ➞ "ffprobe.exe"
### Notes
* Tests will include both absolute and relative paths.
* For simplicity, all paths will include forward slashes.
"""
def get_filename(path):
return path.split("/")[-1]
| [
"[email protected]"
] | |
6289659853a3f52588b54dee16761d5fd5d92783 | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/1061-Lexicographically-Smallest-Equivalent-String/soln.py | e5984b3c6419925571e515793f7e0abedf137ee7 | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 578 | py | class Solution:
def smallestEquivalentString(self, A: str, B: str, S: str) -> str:
parents = {ch : ch for ch in string.ascii_lowercase}
def find(x):
if x == parents[x]:
return parents[x]
parents[x] = find(parents[x])
return parents[x]
def unite(x, y):
rx, ry = find(x), find(y)
if rx > ry:
rx, ry = ry, rx
# rx < ry
parents[ry] = parents[rx]
for a, b in zip(A, B):
unite(a, b)
return ''.join(map(find, S))
| [
"[email protected]"
] | |
71ad0515fca562d7f5067aec818dc3aa4556943e | 561e84bcf8e81e325795a7f917eda62fa850f23e | /tests/Mujoco/multi_plot_loss.py | 6629eec90d2de05b8a6341404e6897bbe30888d1 | [] | no_license | maxiaoba/SoftAC | e397de3c82f7c6c3c97c7a9e483bc9f669e5a07e | 668c91511fa5b1a77676197115561eb7a8ecf5c5 | refs/heads/master | 2020-05-23T12:16:22.721219 | 2020-03-19T04:44:39 | 2020-03-19T04:44:39 | 186,754,090 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,744 | py | import csv
import os.path
import matplotlib
matplotlib.rcParams.update({'font.size': 10})
from matplotlib import pyplot as plt
import numpy as np
itr_interval = 10
max_itr = 2e4
fields = [
'return-average',
# 'vf--avg',
# 'vf1-avg',
# 'vf2-avg',
# 'log-pi-mean',
# 'mean-sq-bellman-error1',
]
itr_name = 'epoch'
min_loss = [-np.inf,-np.inf,-np.inf,-np.inf,-np.inf]
max_loss = [np.inf,np.inf,np.inf,np.inf,np.inf]
exp_name = ["Hopper","Ant","Walker2d","HalfCheetah","Humanoid",
"Swimmer","Reacher","SwimmerRllab","HumanoidRllab",
"InvertedDoublePendulum"][3]
prepath = "./Data/"+exp_name
plot_path = "./Data/"+exp_name
policies = [
"SAC_Gaussiansr5.0",
"FlowQ7_Gaussiansr5.0cg10.0",
"FlowQ7_Gaussiansr5.0cg100.0",
"FlowQ7_Gaussiansr5.0cg1000.0",
]
policy_names = policies
colors = []
for pid in range(len(policies)):
colors.append('C'+str(pid))
extra_name = 'FlowQsr'
pre_name = ''
post_name = ''
plot_name = extra_name
for fid,field in enumerate(fields):
print(field)
fig = plt.figure(fid,figsize=(5,5))
legends = []
plts = []
for (policy_index,policy) in enumerate(policies):
policy_path = pre_name+policy+post_name
Itrs = []
Losses = []
min_itr = np.inf
for trial in range(3):
file_path = prepath+'/'+policy_path+'/'+'seed'+str(trial)+'/process.csv'
print(file_path)
if os.path.exists(file_path):
print(policy+'_'+str(trial))
itrs = []
losses = []
loss = []
with open(file_path) as csv_file:
if '\0' in open(file_path).read():
print("you have null bytes in your input file")
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
else:
csv_reader = csv.reader(csv_file, delimiter=',')
for (i,row) in enumerate(csv_reader):
if i == 0:
entry_dict = {}
for index in range(len(row)):
entry_dict[row[index]] = index
# print(entry_dict)
else:
itr = i-1#int(float(row[entry_dict[itr_name]]))
if itr > max_itr:
break
loss.append(np.clip(float(row[entry_dict[field]]),
min_loss[fid],max_loss[fid]))
if itr % itr_interval == 0:
itrs.append(itr)
loss = np.mean(loss)
losses.append(loss)
loss = []
if len(losses) < min_itr:
min_itr = len(losses)
Losses.append(losses)
Losses = [losses[:min_itr] for losses in Losses]
itrs = itrs[:min_itr]
Losses = np.array(Losses)
print(Losses.shape)
y = np.mean(Losses,0)
yerr = np.std(Losses,0)
plot, = plt.plot(itrs,y,colors[policy_index])
plt.fill_between(itrs,y+yerr,y-yerr,linewidth=0,
facecolor=colors[policy_index],alpha=0.3)
plts.append(plot)
legends.append(policy_names[policy_index])
plt.legend(plts,legends,loc='best')
plt.xlabel('Itr')
plt.ylabel(field)
fig.savefig(plot_path+'/'+plot_name+'_'+"_".join(field.split('/'))+'.pdf')
plt.close(fig) | [
"[email protected]"
] | |
fd81ae94be949535cf1fe0eadba9b97f8ce2d737 | 8f7c595f2b9d075a89417760b7fbf9abb1fecb72 | /common/candle_pattern.py | c952698bb83a7754f6f4afe856dbc8241fd19850 | [
"MIT"
] | permissive | MainakMaitra/trading-utils | 555ed240a20b26d4876f1490fc8a2d9273231fc5 | 3e73091b4d3432e74c385a9677b7f7ca4192c67f | refs/heads/main | 2023-07-04T09:19:40.122188 | 2021-08-08T09:01:37 | 2021-08-08T09:01:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,057 | py | import warnings
warnings.filterwarnings("ignore")
def identify_candle_pattern(ticker_df):
last_candle = ticker_df.iloc[-1]
c = last_candle.close
h = last_candle.high
l = last_candle.low
o = last_candle.open
patterns = []
is_doji = (
abs(c - o) / (h - l) < 0.1
and (h - max(c, o)) > (3 * abs(c - o))
and (min(c, o) - l) > (3 * abs(c - o))
)
is_hanging_man = (
0.3 > abs(c - o) / (h - l) >= 0.1
and (min(c, o) - l) >= (2 * abs(c - o))
and (h - max(c, o)) > (0.25 * abs(c - o))
)
if is_doji:
patterns.append("doji")
if is_hanging_man:
patterns.append("hanging_man")
return ",".join(patterns) if patterns else "na"
# For Reference
# // Created by Robert N. 030715
# // Updated 031115
# // Candle labels
# study(title = "Mamona Candles", overlay = true)
#
# data1=(close[1]>open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and close<open and abs(close-open)/(high-low)>=0.7 and open>=close[1] and close>open[1] and close<((open[1]+close[1])/2))
# plotshape(data1,title="Dark Cloud Cover",text='DarkCloudCover',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data2=(abs(close-open)/(high-low)<0.1 and (high-max(close,open))>(3*abs(close-open)) and (min(close,open)-low)>(3*abs(close-open)))
# plotshape(data2,title="Doji",text='Doji',color=white, style=shape.circle,location=location.belowbar)
#
# data3=(close[1]>open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and abs(close-open)/(high-low)<0.1 and close[1]<close and close[1]<open and (high-max(close,open))>(3*abs(close-open)) and (min(close,open)-low)>(3*abs(close-open)))
# plotshape(data3,title="Doji Star",text='DojiStar',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data4=(abs(close-open)/(high-low)<0.1 and (min(close,open)-low)>(3*abs(close-open)) and (high-max(close,open))<abs(close-open))
# plotshape(data4,title="Dragonfly Doji",text='DragonflyDoji',color=green, style=shape.arrowup,location=location.belowbar)
#
# data5=(close[2]>open[2] and abs(close[2]-open[2])/(high[2]-low[2])>=0.7 and 0.3>abs(close[1]-open[1])/(high[1]-low[1])>=0.1 and close<open and abs(close-open)/(high-low)>=0.7 and close[2]<close[1] and close[2]<open[1] and close[1]>open and open[1]>open and close<close[2])
# plotshape(data5,title="Evening Star",text='EveningStar',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data6=(close[2]>open[2] and abs(close[2]-open[2])/(high[2]-low[2])>=0.7 and abs(close[1]-open[1])/(high[1]-low[1])<0.1 and close<open and abs(close-open)/(high-low)>=0.7 and close[2]<close[1] and close[2]<open[1] and close[1]>open and open[1]>open and close<close[2] and (high[1]-max(close[1],open[1]))>(3*abs(close[1]-open[1])) and (min(close[1],open[1])-low[1])>(3*abs(close[1]-open[1])))
# plotshape(data6,title="Evening Star Doji",text='EveningStarDoji',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data7=(abs(close-open)/(high-low)<0.1 and (high-max(close,open))>(3*abs(close-open)) and (min(close,open)-low)<=abs(close-open))
# plotshape(data7,title="Gravestone Doji",text='GravestoneDoji',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data8=(close<open and 0.3>abs(close-open)/(high-low)>=0.1 and (min(close,open)-low)>=(2*abs(close-open)) and (high-max(close,open))>(0.25*abs(close-open)))
# plotshape(data8,title="Hanging Man Red",text='HangingMan',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data9=(close>open and 0.3>abs(close-open)/(high-low)>=0.1 and (min(close,open)-low)>=(2*abs(close-open)) and (high-max(close,open))>(0.25*abs(close-open)))
# plotshape(data9,title="Hanging Man Green",text='HangingMan',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data10=(close[2]<open[2] and abs(close[2]-open[2])/(high[2]-low[2])>=0.7 and 0.3>abs(close[1]-open[1])/(high[1]-low[1])>=0.1 and close>open and abs(close-open)/(high-low)>=0.7 and close[2]>close[1] and close[2]>open[1] and close[1]<open and open[1]<open and close>close[2])
# plotshape(data10,title="Morning Star",text='MorningStar',color=green, style=shape.arrowup,location=location.belowbar)
#
# data11=(close[2]<open[2] and abs(close[2]-open[2])/(high[2]-low[2])>=0.7 and abs(close[1]-open[1])/(high[1]-low[1])<0.1 and close>open and abs(close-open)/(high-low)>=0.7 and close[2]>close[1] and close[2]>open[1] and close[1]<open and open[1]<open and close>close[2] and (high[1]-max(close[1],open[1]))>(3*abs(close[1]-open[1])) and (min(close[1],open[1])-low[1])>(3*abs(close[1]-open[1])))
# plotshape(data11,title="Morning Star Doji",text='MorningStarDoji',color=green, style=shape.arrowup,location=location.belowbar)
#
# data12=(close[1]<open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and close>open and abs(close-open)/(high-low)>=0.7 and open<=close[1] and close<open[1] and close<((open[1]+close[1])/2))
# plotshape(data12,title="Piercieng Pattern",text='PiercingPattern',color=green, style=shape.arrowup,location=location.belowbar)
#
# data13=(close[1]<open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and 0.3>abs(close-open)/(high-low)>=0.1 and close[1]>close and close[1]>open)
# plotshape(data13,title="Raindrop",text='Raindrop',color=green, style=shape.arrowup,location=location.belowbar)
#
# data14=(close[1]<open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and abs(close-open)/(high-low)<0.1 and close[1]>close and close[1]>open and (high-max(close,open))>(3*abs(close-open)) and (min(close,open)-low)>(3*abs(close-open)))
# plotshape(data14,title="Raindrop Doji",text='RaindropDoji',color=green, style=shape.arrowup,location=location.belowbar)
#
# data15=(close<open and 0.3>abs(close-open)/(high-low)>=0.1 and (high-max(close,open))>=(2*abs(close-open)) and (min(close,open)-low)<=(0.25*abs(close-open)))
# plotshape(data15,title="Inverted Hammer Red",text='InvertedHammer',color=green, style=shape.arrowup,location=location.belowbar)
#
# data16=(close>open and 0.3>abs(close-open)/(high-low)>=0.1 and (high-max(close,open))>=(2*abs(close-open)) and (min(close,open)-low)<=(0.25*abs(close-open)))
# plotshape(data16,title="Inverted Hammer Green",text='InvertedHammer',color=green, style=shape.arrowup,location=location.belowbar)
#
# data17=(close[1]>open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and 0.3>abs(close-open)/(high-low)>=0.1 and close[1]<close and close[1]<open)
# plotshape(data17,title="Star",text='Star',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data18=(close[1]>open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and close<open and abs(close-open)/(high-low)>=0.7 and open>=close[1] and close<close[1] and close>=((open[1]+close[1])/2))
# plotshape(data18,title="Bearish Thrusting",text='BearishThrusting',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data19=(close[1]<open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and close>open and abs(close-open)/(high-low)>=0.7 and open<=close[1] and close>close[1] and close<=((open[1]+close[1])/2))
# plotshape(data19,title="Bullish Thrusting Pattern",text='BullishThrusting',color=green, style=shape.arrowup,location=location.belowbar)
#
# data20=(close[1]<open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and close<open and 0.3>abs(close-open)/(high-low)>=0.1 and abs(low/low[1]-1)<0.05 and abs(close-open)<2*(min(close,open)-low))
# plotshape(data20,title="Tweezers Bottom",text='TweezersBottom',color=green, style=shape.arrowup,location=location.belowbar)
#
# data21=(close[1]>open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and close>open and 0.3>abs(close-open)/(high-low)>=0.1 and abs(high/high[1]-1)<0.05 and abs(close[1]-open[1])<2*(high[1]-max(close[1],open[1])))
# plotshape(data21,title="Tweezers Top",text='TweezersTop',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data22=(close[3]<open[3] and abs(close[3]-open[3])/(high[3]-low[3])>=0.7 and close[2]>open[2] and 0.3>abs(close[2]-open[2])/(high[2]-low[2])>=0.1 and close[1]>open[1] and 0.3>abs(close[1]-open[1])/(high[1]-low[1])>=0.1 and close>open and abs(close-open)/(high-low)>=0.7 and close[2]>close[1] and close[1]>close[3] and open[2]<close[3] and open[1]<close[3] and close>((open[3]+close[3])/2))
# plotshape(data22,title="Tower Bottom",text='TowerBottom',color=green, style=shape.arrowup,location=location.belowbar)
#
# data23=(close[3]>open[3] and abs(close[3]-open[3])/(high[3]-low[3])>=0.7 and close[2]<open[2] and 0.3>abs(close[2]-open[2])/(high[2]-low[2])>=0.1 and close[1]<open[1] and 0.3>abs(close[1]-open[1])/(high[1]-low[1])>=0.1 and close<open and abs(close-open)/(high-low)>=0.7 and close[2]<close[1] and close[1]<close[3] and open[2]>close[3] and open[1]>close[3] and close<((open[3]+close[3])/2))
# plotshape(data23,title="Tower Top",text='TowerTop',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data24=(close[1]<open[1] and 0.7>abs(close[1]-open[1])/(high[1]-low[1])>=0.3 and close>open and 0.7>abs(close-open)/(high-low)>=0.3 and close<=close[1] and close>low[1])
# plotshape(data24,title="Bullish In Neck",text='BullishInNeck',color=green, style=shape.arrowup,location=location.belowbar)
#
# data25=(close[1]>open[1] and 0.7>abs(close[1]-open[1])/(high[1]-low[1])>=0.3 and close<open and 0.7>abs(close-open)/(high-low)>=0.3 and close>=close[1] and close<high[1])
# plotshape(data25,title="Bearish In Neck",text='BearishInNeck',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data26=(close[1]>open[1] and 0.7>abs(close[1]-open[1])/(high[1]-low[1])>=0.3 and close<open and 0.7>abs(close-open)/(high-low)>=0.3 and open<=open[1] and open>low[1])
# plotshape(data26,title="Bullish Separating Lines",text='BullishSeparatingLines',color=green, style=shape.arrowup,location=location.belowbar)
#
# data27=(close[1]<open[1] and 0.7>abs(close[1]-open[1])/(high[1]-low[1])>=0.3 and close>open and 0.7>abs(close-open)/(high-low)>=0.3 and open>=open[1] and open<high[1])
# plotshape(data27,title="Bearish Separating Lines",text='BearishSeparatingLines',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data28=(close[1]<open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and 0.3>abs(close-open)/(high-low)>=0.1 and high<open[1] and low>close[1])
# plotshape(data28,title="Bullish Harami",text='BullishHarami',color=green, style=shape.arrowup,location=location.belowbar)
#
# data29=(close[1]>open[1] and abs(close[1]-open[1])/(high[1]-low[1])>=0.7 and 0.3>abs(close-open)/(high-low)>=0.1 and high<close[1] and low>open[1])
# plotshape(data29,title="Bearish Harami",text='BearishHarami',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data30=(close[1]<open[1] and 0.3>abs(close[1]-open[1])/(high[1]-low[1])>=0.1 and close>open and abs(close-open)/(high-low)>=0.7 and high[1]<close and low[1]>open)
# plotshape(data30,title="Bullish Engulfing",text='BullishEngulfing',color=green, style=shape.arrowup,location=location.belowbar)
#
# data31=(close[1]>open[1] and 0.3>abs(close[1]-open[1])/(high[1]-low[1])>=0.1 and close<open and abs(close-open)/(high-low)>=0.7 and high[1]<open and low[1]>close)
# plotshape(data31,title="Bearish Engulfing",text='BearishEngulfing',color=red, style=shape.arrowdown,location=location.abovebar)
#
# data32=(abs(close[1]-open[1])/(high[1]-low[1])<0.1 and close>open and abs(close-open)/(high-low)>=0.7 and high[1]<close and low[1]>open and (high[1]-max(close[1],open[1]))>(3*abs(close[1]-open[1])) and (min(close[1],open[1])-low[1])<=abs(close[1]-open[1]))
# plotshape(data32,title="Doji Bullish Engulfing",text='DojiBullishEngulfing',color=green, style=shape.arrowup,location=location.belowbar)
#
# data33=(abs(close[1]-open[1])/(high[1]-low[1])<0.1 and close<open and abs(close-open)/(high-low)>=0.7 and high[1]<open and low[1]>close and (high[1]-max(close[1],open[1]))>(3*abs(close[1]-open[1])) and (min(close[1],open[1])-low[1])<=abs(close[1]-open[1]))
# plotshape(data31,title="Doji Bearish Engulfing",text='DojiBearishEngulfing',color=red, style=shape.arrowdown,location=location.abovebar)
| [
"[email protected]"
] | |
506ad697da20580fe7c04f475cd6a9627d12a143 | 3fb0ce33f00b96ae3808a32da44de3e887434afb | /.提出一覧/AtCoder/ABC152/b/main.py | db01e75914ac864f9d5405badbb44bdf931bfdbe | [] | no_license | Yukikazari/kyoupuro | ca3d74d8db024b1988cd0ff00bf069ab739783d7 | 343de455c4344dbcfa4524b492f7f6205c9db26f | refs/heads/master | 2023-02-21T01:53:52.403729 | 2021-01-27T03:55:01 | 2021-01-27T03:55:01 | 282,222,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | #!/usr/bin/env python3
#import
#import math
#import numpy as np
#= int(input())
#= input()
a, b = map(int, input().split())
print(str(a) * b if a < b else str(b) * a)
| [
"[email protected]"
] | |
ddcdca81b7542c683b67f1aafaf5ef342f08229f | 8b9897577d3278e6070bb99f5fcfcc4f49df538e | /l10n_be_coda_advanced/wizard/coda_helpers.py | d2ae4c5393cc9fd1251b93a8e8ca0967ddef06ea | [] | no_license | ilexius/noviat-apps | 422eb3df8ab4f6b944dcc841d3cf442b8da2aad1 | 84510bab01251d96bcb4a2ed9c14db8737495592 | refs/heads/8.0 | 2021-01-15T12:37:56.579920 | 2015-03-16T15:39:56 | 2015-03-16T15:39:56 | 35,040,239 | 0 | 0 | null | 2015-05-04T14:42:51 | 2015-05-04T14:42:51 | null | UTF-8 | Python | false | false | 3,482 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
#
# Copyright (c) 2010-now Noviat nv/sa (www.noviat.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_iban.base_iban import _ref_iban, _format_iban
import time
def calc_iban_checksum(country, bban):
bban = bban.replace(' ', '').upper() + country.upper() + '00'
base = ''
for c in bban:
if c.isdigit():
base += c
else:
base += str(ord(c) - ord('A') + 10)
kk = 98 - int(base) % 97
return str(kk).rjust(2, '0')
def check_bban(country, bban):
if country == 'BE':
try:
int(bban)
except:
return False
if len(bban) != 12:
return False
return True
def check_iban(iban):
"""
Check the IBAN number
Logic partially based upon base_iban module, cf. is_iban_valid method
"""
iban = _format_iban(iban).lower()
if iban[:2] not in _ref_iban:
return False
if len(iban) != len(_format_iban(_ref_iban[iban[:2]])):
return False
# the four first digits have to be shifted to the end
iban = iban[4:] + iban[:4]
# letters have to be transformed into numbers (a = 10, b = 11, ...)
iban2 = ""
for char in iban:
if char.isalpha():
iban2 += str(ord(char)-87)
else:
iban2 += char
# iban is correct if modulo 97 == 1
return int(iban2) % 97 == 1
def get_iban_and_bban(number):
"""
return IBAN and BBAN numbers
Logic partially based upon base_iban module, cf. get_bban_from_iban method
"""
mapping_list = {
# TODO add rules for others countries
'be': lambda x: x[4:],
'fr': lambda x: x[14:],
'ch': lambda x: x[9:],
'gb': lambda x: x[14:],
}
number = number.replace(' ', '')
for code, function in mapping_list.items():
if number.lower().startswith(code):
return [function(number), number]
return [number]
def repl_special(s):
s = s.replace("\'", "\'" + "'")
return s
def str2date(date_str):
try:
return time.strftime('%Y-%m-%d', time.strptime(date_str, '%d%m%y'))
except:
return False
def str2time(time_str):
return time_str[:2] + ':' + time_str[2:]
def str2float(str):
try:
return float(str)
except:
return 0.0
def list2float(lst):
try:
return str2float((lambda s: s[:-3] + '.' + s[-3:])(lst))
except:
return 0.0
def number2float(s, d):
try:
return float(s[:len(s) - d] + '.' + s[len(s) - d:])
except:
return False
| [
"[email protected]"
] | |
1b2fb14a7985f07866ac3a63d467662a7aec1bae | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/compose/2016/8/setup.py | 5cb52dae4a36d541c4ac4370ce84ff326b9ec86f | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 2,162 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import os
import re
import sys
from setuptools import find_packages
from setuptools import setup
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(path, encoding='utf-8') as fobj:
return fobj.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
install_requires = [
'cached-property >= 1.2.0, < 2',
'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4',
'requests >= 2.6.1, < 2.8',
'texttable >= 0.8.1, < 0.9',
'websocket-client >= 0.32.0, < 1.0',
'docker-py >= 1.9.0, < 2.0',
'dockerpty >= 0.4.1, < 0.5',
'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3',
]
tests_require = [
'pytest',
]
if sys.version_info[:2] < (3, 4):
tests_require.append('mock >= 1.0.1')
install_requires.append('enum34 >= 1.0.4, < 2')
setup(
name='docker-compose',
version=find_version("compose", "__init__.py"),
description='Multi-container orchestration for Docker',
url='https://www.docker.com/',
author='Docker, Inc.',
license='Apache License 2.0',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
entry_points="""
[console_scripts]
docker-compose=compose.cli.main:main
""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
| [
"[email protected]"
] | |
a57c46b717155156ea82dd13afd065add60ee1b1 | 2bafb77b683c5c41938cb9d5a01b6e5d29cfb9b7 | /djviews/bin/wheel | 35896295de858b303021dc3c09d1888a7bc2b3b4 | [] | no_license | FaisalWant/Django-Website | 3c1c31d2f2e942a5f9d43def9317fea2d5aeb51b | ab7864c308e677c38107c65e2e5b0bdde24aa132 | refs/heads/master | 2023-03-14T12:13:59.296757 | 2021-03-02T18:56:22 | 2021-03-02T18:56:22 | 339,169,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | #!/home/xander/Documents/DjangoProjects/Django3/DjangoNewDoc/djviews/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
8846947bbe4c37c7b7a6464de1a539be071a0adf | 94579f95eef969aec090f98a3c6841ea341af36f | /covids/views.py | 143c9976747cf3d5c02064a6af21d8ae6e4a5d31 | [] | no_license | smilejakdu/django_covid | d8b10e401dcf6fa7fe65c72e3c2b03f4fe2171d5 | 1092d6798ca30270d3f6c65345063e0376dfbbb8 | refs/heads/master | 2022-04-18T12:13:42.451188 | 2020-04-15T19:00:40 | 2020-04-15T19:00:40 | 255,989,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,426 | py | import json , requests
from .models import Covid ,KoreaCovid
from django.views import View
from django.http import HttpResponse, JsonResponse
from django.db.models import Count, Q , Sum
class CovidApiView(View):
def get(self , request):
try:
query = request.GET.get('keyword' , None) # 코로나 데이터 검색
if query :
world_data = Covid.objects.filter(Q(area__icontains=query) | Q(country__icontains=query)).all()
korea_data = KoreaCovid.objects.filter(Q(area__icontains=query)).all()
world_patient_count = world_data.count()
korea_patient_count = korea_data.count()
data = {
'country_covid_count' : world_patient_count,
'korea_covid_count' : korea_patient_count,
'world_covid_data' : [{
'id' : world.id,
'area' : world.area,
'country' : world.country,
'patient' : world.patient,
'dead' : world.dead,
} for world in world_data],
'korea_covid_data' : [{
'id' : korea.id,
'area' : korea.area,
'patient' : korea.patient,
} for korea in korea_data]
}
return JsonResponse({"data" : data},status=200)
country_covid = Covid.objects.values()
korea_covid = KoreaCovid.objects.values()
korea_covid_count = KoreaCovid.objects.all().aggregate(Sum('patient'))
country_covid_count = Covid.objects.all().aggregate(Sum('patient'))
return JsonResponse({'data' : {
'country_covid_count' : country_covid_count,
'korea_covid_count' : korea_covid_count,
'world_covid_data' : list(country_covid),
'korea_covid_data' : list(korea_covid),
}}, status=200)
except Covid.DoesNotExist:
return JsonResponse({'message': 'Not found'}, status=400)
except TypeError:
return JsonResponse({'message': 'error'}, status=400)
| [
"[email protected]"
] | |
0d5e647977a66fe9030108befd680373845aee95 | 25dda94672497e3287a7403e283fb279ad171b79 | /boj/11054 가장 긴 바이토닉 부분수열.py | 458adceb31b78b34b5a3924ec2c09a7a983434dd | [] | no_license | woorud/Algorithm | c94b844e8c96a446c5fdee5c0abb159bfee384d7 | f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541 | refs/heads/master | 2023-02-23T13:53:28.645036 | 2021-01-29T12:24:23 | 2021-01-29T12:24:23 | 230,908,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | n = int(input())
num_list = list(map(int, input().split()))
dpi = [0 for i in range(n)]
dpd = [0 for i in range(n)]
for i in range(n):
dpi[i] = 1
for j in range(i):
if num_list[j] < num_list[i]:
dpi[i] = max(dpi[i], dpi[j]+1)
for i in range(n-1, -1, -1):
dpd[i] = 1
for j in range(n-1, i-1, -1):
if num_list[j] < num_list[i]:
dpd[i] = max(dpd[i], dpd[j]+1)
cnt = 0
for i in range(n):
if cnt < dpi[i] + dpd[i]-1:
cnt = dpi[i] + dpd[i]-1
print(dpi, dpd, cnt) | [
"[email protected]"
] | |
e77a6ff25e6a3e458ac0d9464dce70336b45709a | 88a2f57b7d660228ca1ac922f0f582910bcacb3d | /algorithm/problem/AD sol/AD_3day_솔루션_김은경/주사위던지기1.py | d6af23c180c38cbdd427b3caf0f2b150d4c6d32b | [] | no_license | chelseashin/TIL | adc5ed0bd4ba084e85b74baa9699096a7af5585e | 376b56844985b3ff43b94fa18086a449e6deac69 | refs/heads/master | 2022-12-10T02:13:39.680936 | 2020-11-19T13:18:30 | 2020-11-19T13:18:30 | 162,103,813 | 2 | 0 | null | 2022-12-08T04:53:38 | 2018-12-17T09:11:23 | Jupyter Notebook | UTF-8 | Python | false | false | 1,380 | py | import sys
sys.stdin = open("in.txt")
def DFS1(no): # M=1 : 눈의 중복순열
if no>N:
for i in range(1, N+1): print(rec[i], end=' ')
print()
return
for i in range(1, 7): # 눈
rec[no]=i #눈 기록
DFS1(no+1)
def DFS3(no): # M=3 : 눈의 중복 배제한 순열
if no>N:
for i in range(1, N+1): print(rec[i], end=' ')
print()
return
for i in range(1, 7): # 눈
if chk[i]: continue
chk[i] =1
rec[no]=i #눈 기록
DFS3(no+1)
chk[i]=0
def DFS2(no, start): # M=2 : 중복조합
if no > N:
for i in range(1, N + 1): print(rec[i], end=' ')
print()
return
for i in range(start, 7): # 눈
rec[no] = i # 눈 기록
DFS2(no + 1, i)
def DFS4(no, start): # M=4 : 조합
if no > N:
for i in range(1, N + 1): print(rec[i], end=' ')
print()
return
for i in range(start, 7): # 눈
rec[no] = i # 눈 기록
DFS4(no + 1, i+1)
#main -----------------------------
N, M = map(int, input().split())
rec =[0]*(N+1)
chk =[0]*7 #눈체크배열
if M==1: DFS1(1) # 눈의 중복순열 1번 주사위부터 시작
elif M==3: DFS3(1) # 눈의 중복배재한 순열
elif M==2: DFS2(1, 1) # 눈 중복조합 : 1번 주사위부터 시작, 1눈부터 시작
elif M==4: DFS4(1,1)
| [
"[email protected]"
] | |
3b6456b8d23503a98bd1c276e4809d8ea93d3c1b | 33e5d2a1b26f467e0ec116cb99c354c44266581b | /meteva/method/Vector/plot.py | 81bd2bca95fc6e6b55d3cc8297e6e6e44b9d1d24 | [] | no_license | Snake-99/meteva | 8ebf74b64f2bb26d9eb3affdb044ba01bcf3e50e | afcb34895758b8eeb5e503feaddad5248e5f2f06 | refs/heads/master | 2023-02-18T11:14:10.652467 | 2021-01-18T09:02:46 | 2021-01-18T09:02:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,327 | py | import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import math
import numpy as np
import meteva
from matplotlib.colors import BoundaryNorm
def scatter_uv_error(u_ob,u_fo,v_ob,v_fo,member_list = None,title = "风矢量误差散点分布图"
, vmax=None, ncol=None, save_path=None, show=False, dpi=300,
sup_fontsize=10, width=None, height=None):
if vmax is None:
du = u_fo - u_ob
dv = v_fo - v_ob
speed_d= np.sqrt(du * du + dv * dv)
vmax = np.max(speed_d)
vmax = math.ceil(vmax)
Fo_shape = u_fo.shape
Ob_shape = u_ob.shape
Ob_shpe_list = list(Ob_shape)
size = len(Ob_shpe_list)
ind = -size
Fo_Ob_index = list(Fo_shape[ind:])
if Fo_Ob_index != Ob_shpe_list:
print('实况数据和观测数据维度不匹配')
return
Ob_shpe_list.insert(0, -1)
new_Fo_shape = tuple(Ob_shpe_list)
new_u_Fo = u_fo.reshape(new_Fo_shape)
new_v_Fo = v_fo.reshape(new_Fo_shape)
new_Fo_shape = new_u_Fo.shape
sub_plot_num = new_Fo_shape[0]
if ncol is None:
if sub_plot_num ==1:
ncols = 1
elif sub_plot_num %2 == 0:
ncols = 2
else:
ncols = 3
else:
ncols = ncol
nrows = math.ceil(new_Fo_shape[0] / ncols)
if height is None:
if nrows==1:
if ncols <3:
height_fig = 3.5
else:
height_fig = 2.5
else:
if ncols > nrows:
height_fig = 6
else:
height_fig = 7
else:
height_fig = height
height_suptitle = 0.1
height_xticks_title = 0.1
height_hspace = 0.6
heidht_axis = (height_fig - height_suptitle - height_xticks_title - height_hspace * (nrows - 1)) / nrows
width_axis = heidht_axis
width_yticks = 0.15
width_wspace = width_yticks * 5
if width is None:
width_fig = width_axis * ncols + width_wspace * (ncols - 1) + width_yticks
else:
width_fig = width
fig = plt.figure(figsize=(width_fig,height_fig),dpi = dpi)
u1 = u_ob.flatten()
v1 = v_ob.flatten()
if member_list is None:
member_list = []
for line in range(new_Fo_shape[0]):
member_list.append("预报" + str(line))
colors = meteva.base.color_tools.get_color_list(new_Fo_shape[0]+1)
for line in range(new_Fo_shape[0]):
u2 = new_u_Fo[line, :].flatten()
v2 = new_v_Fo[line, :].flatten()
markersize = 5 * width_axis * heidht_axis / np.sqrt(u_ob.size)
if markersize < 1:
markersize = 1
elif markersize > 20:
markersize = 20
plt.subplot(nrows, ncols, line + 1)
plt.plot(u2-u1,v2-v1,'.',color = colors[line+1], markersize=markersize)
#plt.plot(u1,v1,'.',color= 'b', markersize=markersize)
plt.xlabel("U分量",fontsize = sup_fontsize *0.9)
plt.ylabel("V分量",fontsize = sup_fontsize *0.9)
plt.title(member_list[line],fontsize = sup_fontsize)
#print(maxs)
plt.xlim(-vmax,vmax)
plt.ylim(-vmax,vmax)
#plt.legend()
angles = np.arange(0,360,45)
for i in range(len(angles)):
angle = angles[i] * 3.1415926 /180
r = np.arange(0,vmax+1,vmax * 0.1)
x = r * np.sin(angle)
y = r * np.cos(angle)
plt.plot(x,y,"--",color = "k",linewidth = 0.5)
rs = np.arange(0,vmax+1,1)
for i in range(len(rs)):
r = rs[i]
angle = np.arange(0,360) * 3.1415926 /180
x = r * np.sin(angle)
y = r * np.cos(angle)
plt.plot(x,y,"--",color = "k",linewidth = 0.5)
titlelines = title.split("\n")
fig.suptitle(title, fontsize=sup_fontsize, y=0.99+0.01 * len(titlelines))
if(save_path is not None):
file1,extension = os.path.splitext(save_path)
extension = extension[1:]
plt.savefig(save_path,format = extension)
else:
show = True
if show:
plt.show()
plt.close()
def scatter_uv(u_ob,u_fo,v_ob,v_fo,member_list = None,title = "风矢量散点分布图"
, vmax=None, ncol=None, save_path=None, show=False, dpi=300,
sup_fontsize=10, width=None, height=None,add_randn_to_ob = 0.0):
if vmax is None:
speed_ob = np.sqrt(u_ob * u_ob + v_ob * v_ob)
speed_fo = np.sqrt(u_fo * u_fo + v_fo * v_fo)
vmax = max(np.max(speed_ob), np.max(speed_fo))
vmax = math.ceil(vmax)
Fo_shape = u_fo.shape
Ob_shape = u_ob.shape
Ob_shpe_list = list(Ob_shape)
size = len(Ob_shpe_list)
ind = -size
Fo_Ob_index = list(Fo_shape[ind:])
if Fo_Ob_index != Ob_shpe_list:
print('实况数据和观测数据维度不匹配')
return
Ob_shpe_list.insert(0, -1)
new_Fo_shape = tuple(Ob_shpe_list)
new_u_Fo = u_fo.reshape(new_Fo_shape)
new_v_Fo = v_fo.reshape(new_Fo_shape)
new_Fo_shape = new_u_Fo.shape
sub_plot_num = new_Fo_shape[0]
if ncol is None:
if sub_plot_num ==1:
ncols = 1
elif sub_plot_num %2 == 0:
ncols = 2
else:
ncols = 3
else:
ncols = ncol
nrows = math.ceil(new_Fo_shape[0] / ncols)
if height is None:
if nrows==1:
if ncols <3:
height_fig = 3.5
else:
height_fig = 2.5
else:
if ncols > nrows:
height_fig = 6
else:
height_fig = 7
else:
height_fig = height
height_suptitle = 0.1
height_xticks_title = 0.1
height_hspace = 0.6
heidht_axis = (height_fig - height_suptitle - height_xticks_title - height_hspace * (nrows - 1)) / nrows
width_axis = heidht_axis
width_yticks = 0.15
width_wspace = width_yticks * 5
if width is None:
width_fig = width_axis * ncols + width_wspace * (ncols - 1) + width_yticks
else:
width_fig = width
fig = plt.figure(figsize=(width_fig,height_fig),dpi = dpi)
u1 = u_ob.flatten() + np.random.randn(len(u_ob))*add_randn_to_ob
v1 = v_ob.flatten() + np.random.randn(len(v_ob))*add_randn_to_ob
if member_list is None:
member_list = []
for line in range(new_Fo_shape[0]):
member_list.append("预报" + str(line))
colors = meteva.base.color_tools.get_color_list(new_Fo_shape[0]+1)
for line in range(new_Fo_shape[0]):
u2 = new_u_Fo[line, :].flatten()
v2 = new_v_Fo[line, :].flatten()
markersize = 15 * width_axis * heidht_axis / np.sqrt(u_ob.size)
if markersize < 1:
markersize = 1
elif markersize > 20:
markersize = 20
plt.subplot(nrows, ncols, line + 1)
plt.plot(u1,v1,'.',color= "r", markeredgewidth = 0, markersize=markersize,alpha = 0.5,label = "ob")
plt.plot(u2,v2,'.',color= "b", markeredgewidth = 0, markersize=markersize,alpha = 0.5,label = "fo")
plt.xlabel("U分量",fontsize = sup_fontsize *0.9)
plt.ylabel("V分量",fontsize = sup_fontsize *0.9)
plt.title(member_list[line],fontsize = sup_fontsize)
#print(maxs)
plt.xlim(-vmax,vmax)
plt.ylim(-vmax,vmax)
plt.legend()
angles = np.arange(0,360,45)
for i in range(len(angles)):
angle = angles[i] * 3.1415926 /180
r = np.arange(0,vmax+1,vmax * 0.1)
x = r * np.sin(angle)
y = r * np.cos(angle)
plt.plot(x,y,"--",color = "k",linewidth = 0.5)
rs = np.arange(0,vmax+1,1)
for i in range(len(rs)):
r = rs[i]
angle = np.arange(0,360) * 3.1415926 /180
x = r * np.sin(angle)
y = r * np.cos(angle)
plt.plot(x,y,"--",color = "k",linewidth = 0.5)
titlelines = title.split("\n")
fig.suptitle(title, fontsize=sup_fontsize, y=0.99+0.01 * len(titlelines))
if(save_path is not None):
file1,extension = os.path.splitext(save_path)
extension = extension[1:]
plt.savefig(save_path,format = extension)
else:
show = True
if show:
plt.show()
plt.close()
def uv_frequent_statistic(u,v,ngrade = 16,half_span = 22.5,rate = 20,smtime = 50):
'''
:param u: 输入的u分量列表
:param v: 输入的v分量列表
:param ngrade: 统计的时候对360度均匀分布的ngrade个不同角度进行统计
:param half_span: 统计的角度范围,围绕一个中心角度两侧的扇形角度
:param rate: 将统计结果加密成连续变化的结果,加密的比例,
:param smtime: 对一圈统计结果进行平滑的次数
:return:
'''
s1,a1 = meteva.base.tool.math_tools.u_v_to_s_d(u,v)
step = 360 / ngrade
ms1 = np.zeros(ngrade)
ma1 = np.zeros(ngrade)
mf1 = np.zeros(ngrade)
mstd1 = np.zeros(ngrade)
for i in range(ngrade):
mid_angle = i * step
d_angle = 180 - np.abs(np.abs(a1 - mid_angle) - 180)
s2 = s1[d_angle<=half_span]
if s2.size == 0:
ms1[i] = 0
mf1[i] = 0
ma1[i] = 0
mstd1[i] = 0.5
else:
ms1[i] = np.mean(s2)
mf1[i] = len(s2)
ma1[i] = mid_angle
mstd1[i] = np.std(s2)
mu1,mv1 = meteva.base.math_tools.s_d_to_u_v(ms1,ma1)
ngrade2 = ngrade * rate
x = np.arange(ngrade2)/ rate
ig = x.astype(dtype='int16')
dx = x - ig
ig1 = ig + 1
ii = ig % ngrade
ii1 = ig1 % ngrade
mu2 = mu1[ii] * (1-dx) + mu1[ii1] * dx
mv2 = mv1[ii] * (1-dx) + mv1[ii1] * dx
mf2 = mf1[ii] * (1-dx) + mf1[ii1] * dx
mstd2 = mstd1[ii] * (1-dx) + mstd1[ii1] * dx
ig = np.arange(ngrade2)
ig1 = (ig + 1) % ngrade2
ig_1 = (ig + ngrade2 - 1) % ngrade2
for k in range(smtime):
mu2 = (mu2 * 2 + mu2[ig1] + mu2[ig_1])/4
mv2 = (mv2 * 2 + mv2[ig1] + mv2[ig_1]) / 4
mf2 = (mf2 * 2 + mf2[ig1] + mf2[ig_1]) / 4
mstd2 = (mstd2 * 2 + mstd2[ig1] + mstd2[ig_1]) / 4
mf2 = 10 * (360/half_span) * (mf2/u.size)
return mu2,mv2,mf2,mstd2
def statisitic_uv(u_ob,u_fo,v_ob,v_fo,member_list = None,title = "风矢量分布统计图"
,vmax=None, ncol=None, save_path=None, show=False, dpi=300,
sup_fontsize=10, width=None, height=None):
Fo_shape = u_fo.shape
Ob_shape = u_ob.shape
Ob_shpe_list = list(Ob_shape)
size = len(Ob_shpe_list)
ind = -size
Fo_Ob_index = list(Fo_shape[ind:])
if Fo_Ob_index != Ob_shpe_list:
print('实况数据和观测数据维度不匹配')
return
Ob_shpe_list.insert(0, -1)
new_Fo_shape = tuple(Ob_shpe_list)
new_u_Fo = u_fo.reshape(new_Fo_shape)
new_v_Fo = v_fo.reshape(new_Fo_shape)
new_Fo_shape = new_u_Fo.shape
sub_plot_num = new_Fo_shape[0]
if ncol is None:
if sub_plot_num ==1:
ncols = 1
elif sub_plot_num %2 == 0:
ncols = 2
else:
ncols = 3
else:
ncols = ncol
nrows = math.ceil(new_Fo_shape[0] / ncols)
if height is None:
if nrows==1:
if ncols <3:
height_fig = 3.5
else:
height_fig = 2.5
else:
if ncols > nrows:
height_fig = 6
else:
height_fig = 7
else:
height_fig = height
height_suptitle = 0.1
height_xticks_title = 0.1
height_hspace = 0.6
heidht_axis = (height_fig - height_suptitle - height_xticks_title - height_hspace * (nrows - 1)) / nrows
width_axis = heidht_axis
width_yticks = 0.15
width_wspace = width_yticks * 5
if width is None:
width_fig = width_axis * ncols + width_wspace * (ncols - 1) + width_yticks
else:
width_fig = width
fig = plt.figure(figsize=(width_fig,height_fig),dpi = dpi)
u1 = u_ob.flatten()
v1 = v_ob.flatten()
mu1,mv1,mf1,mstd1 = uv_frequent_statistic(u1,v1)
ms1,ma1 = meteva.base.math_tools.u_v_to_s_d(mu1,mv1)
gray1 = ms1/(ms1+mstd1)
cmap1, clevs1 = meteva.base.tool.color_tools.def_cmap_clevs(cmap="autumn", vmin=0.5, vmax=1)
norm1= BoundaryNorm(clevs1, ncolors=cmap1.N-1)
cmap2, clevs2= meteva.base.tool.color_tools.def_cmap_clevs(cmap="winter", vmin=0.5, vmax=1)
norm2= BoundaryNorm(clevs2, ncolors=cmap1.N-1)
if member_list is None:
member_list = []
for line in range(new_Fo_shape[0]):
member_list.append("预报" + str(line))
ms_list = [ms1]
mu2_list = []
mv2_list = []
mf2_list = []
mgray2_list = []
for line in range(new_Fo_shape[0]):
u2 = new_u_Fo[line, :].flatten()
v2 = new_v_Fo[line, :].flatten()
mu2, mv2, mf2, mstd2 = uv_frequent_statistic(u2, v2)
ms2, ma2 = meteva.base.math_tools.u_v_to_s_d(mu2, mv2)
ms_list.append(ms2)
mu2_list.append(mu2)
mv2_list.append(mv2)
mf2_list.append(mf2)
gray2 = ms2 / (ms2 + mstd2)
mgray2_list.append(gray2)
if vmax is None:
vmax = np.max(np.array(ms_list)) * 1.2
ax_ob = None
ax_fo = None
for line in range(new_Fo_shape[0]):
plt.subplot(nrows, ncols, line + 1)
ax_ob = plt.scatter(mu1, mv1, c=gray1,s = mf1,cmap = cmap1,norm=norm1)
ax_fo = plt.scatter(mu2_list[line], mv2_list[line], c=mgray2_list[line],s = mf2_list[line],cmap = cmap2,norm = norm2)
plt.xlabel("U分量",fontsize = sup_fontsize *0.9)
plt.ylabel("V分量",fontsize = sup_fontsize *0.9)
plt.title(member_list[line],fontsize = sup_fontsize)
#print(maxs)
plt.xlim(-vmax,vmax)
plt.ylim(-vmax,vmax)
#plt.legend()
angles = np.arange(0,360,45)
for i in range(len(angles)):
angle = angles[i] * 3.1415926 /180
r = np.arange(0,vmax+1,vmax * 0.1)
x = r * np.sin(angle)
y = r * np.cos(angle)
plt.plot(x,y,"--",color = "k",linewidth = 0.5)
rs = np.arange(0,vmax+1,1)
for i in range(len(rs)):
r = rs[i]
angle = np.arange(0,360) * 3.1415926 /180
x = r * np.sin(angle)
y = r * np.cos(angle)
plt.plot(x,y,"--",color = "k",linewidth = 0.5)
colorbar_position_grid = fig.add_axes([0.12, -0.05, 0.35, 0.03]) # 位置[左,下,宽,高]
colorbar_ob = plt.colorbar(ax_ob, cax=colorbar_position_grid, orientation='horizontal')
colorbar_ob.set_label('指定角度上观测风速的一致性')
colorbar_position_grid = fig.add_axes([0.55, -0.05, 0.35, 0.03]) # 位置[左,下,宽,高]
colorbar_fo = plt.colorbar(ax_fo, cax=colorbar_position_grid, orientation='horizontal')
colorbar_fo.set_label('指定角度上预报风速的一致性')
titlelines = title.split("\n")
fig.suptitle(title, fontsize=sup_fontsize, y=0.99+0.01 * len(titlelines))
if(save_path is not None):
file1,extension = os.path.splitext(save_path)
extension = extension[1:]
plt.savefig(save_path,format = extension)
else:
show = True
if show:
plt.show()
plt.close()
def frequent_distribution_uv():
pass
def regress_uv():
pass | [
"[email protected]"
] | |
10eba65d3445dee467671ffc6c4761edd61d7cb3 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rin.py | 517ac2ca3690ac6482c12bf0e356b569497144ff | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rIN':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
3bc1353a645d0c71c621ab33cb88163e3daf3652 | d3eea2056dd9798938162b07bee105751943f2ed | /install.py | ce2865f02442894e29996b04e54956d9ce809278 | [] | no_license | ExpLangcn/VulScanner | 193ce6591a7b77774d6eab1dc5d2fc95e21f92fb | b249e00cacaff42d6eb99e3f4e60532dcf3416ff | refs/heads/main | 2023-06-20T11:29:02.677327 | 2021-07-20T01:04:07 | 2021-07-20T01:04:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | import configparser
import os
import pymysql
from django.conf import settings
settings.configure()
conf = configparser.ConfigParser()
conf.read(os.getcwd() + "/" + "config.ini")
mysql_config = { # for mysql and postgresql
'host': conf.get('global', 'ip'),
'port': int(conf.get('global', 'port')),
'user': conf.get('global', 'uname'),
'password': conf.get('global', 'passwd'),
'database': conf.get('global', 'table'),
"connect_timeout": 1
}
def exec_sql(conn, sql):
pass
if __name__ == '__main__':
sql_file = open("poc.sql", "rb")
try:
conn = pymysql.connect(**mysql_config)
cursor = conn.cursor()
for i in sql_file:
result = (cursor.execute(i.strip().decode()))
if not result == 1:
print("[-]execute sql fail")
break
conn.commit()
conn.close()
print("[+]install pocs success")
except Exception as e:
print("[-]can't connect to mysql")
| [
"[email protected]"
] | |
68f6b2841c0268d392a9d100f17394c1db3517a7 | 5163470734c20167148271381c549dadf30dc846 | /setup.py | 053623d127c358077949c873db5878f419fc85ee | [] | no_license | fabiommendes/uritool | 2c9a590da5bafa2d268fa1990df44803d760b7a4 | 224dfc33fa65f40e90b0ac61b27a1199439e1a28 | refs/heads/master | 2016-08-11T06:21:39.424548 | 2015-12-03T22:15:05 | 2015-12-03T22:15:05 | 46,719,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | import os
import setuptools
from setuptools import setup
VERSION = '0.1.1'
AUTHOR = 'Fábio Macêdo Mendes'
# Save version in __meta__ file
with open(os.path.join('src', 'uritool', 'meta.py'), 'w') as F:
F.write(
'# -*- coding: utf-8 -*-\n'
'# This is an autogenerated file! Do not edit.\n'
'__version__ = %r\n'
'__author__ = %r\n' % (VERSION, AUTHOR)
)
#
# Main configuration script
#
setup(
name='uritool',
version=VERSION,
description='Extract data from the URI Online Judge website at https://www.urionlinejudge.com.br/',
author='Fábio Macêdo Mendes',
author_email='[email protected]',
url='https://github.com/fabiommendes/uritool/',
long_description=(
r'''Extract data from URI website and grade the submissions by your
students.'''),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
],
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
license='GPL',
install_requires=['lxml', 'requests', 'pandas', 'numpy'],
#
# Scripts
#
entry_points={
'console_scripts': ['uritool = uritool.__main__:main'],
},
zip_safe=False,
)
| [
"[email protected]"
] | |
f6b76b989bff888993a075ab96a520c07bf1df26 | f54b7fa675df8d0e30c447d7212f7116f16b7e42 | /Kattis/electricalOutlets.py | 851ff22acf1fead374bcf7e4ac700dc3180133ac | [] | no_license | ManuLam/Competition | eccee2c3432d46e1e3d028f6ebc04285d396c85a | a01ea254243b4799bd8c44cd94c95e74786415af | refs/heads/master | 2023-06-27T16:56:58.845783 | 2021-08-01T10:26:04 | 2021-08-01T10:26:04 | 76,399,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | for _ in range(int(input())):
outlets = list(map(int, input().split()))[1:]
print(sum(outlets) - len(outlets) + 1)
| [
"[email protected]"
] | |
d70f548e7da1df6761bc27d979d7635999271f32 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/e1e.py | b449d2237d988b5084620e8b0e9861e37cea25f5 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'e1E':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
bfcede3b1aea7278cb22179812be254f7ebd5abe | 87829066b8152bb71450827b1d0bafe0cda3efc8 | /collective/googleplus/interfaces/__init__.py | a9222fe9e3c29cc9424baabe5f7ccaf8c89df4f4 | [] | no_license | toutpt/collective.googleplus | fb7c53133bfb47d90dea202f8ae3207ac44a9409 | fd8aa1ad11ecb08d8f056fd77c168067da0ea589 | refs/heads/master | 2021-01-25T08:49:05.841177 | 2011-10-21T08:33:45 | 2011-10-21T08:33:45 | 2,619,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | from googlepluslayerlayer import IGooglePlusLayer
| [
"[email protected]"
] | |
0b1340655bdd9f4c9193f15b06aeb6d4345b1142 | 89cb758310a5319f4b4ce88ae4339e4a486cf3af | /app_controllers/infrastructure/kubernetes-deployments/services/nginx-modsecurity/02_service.py | 25f5a05fc161c277101dd375f26110e8330e008a | [] | no_license | cavalrytactics/securethebox-server-legacy | df843c4bea87dfee139cf2661f680af2f3b9af4e | ded8dc68bfcaceee6b626f01d2d03c606155da06 | refs/heads/master | 2022-04-04T13:21:28.813670 | 2020-02-08T20:56:04 | 2020-02-08T20:56:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | import sys
def writeConfig(**kwargs):
template = """
apiVersion: v1
kind: Service
metadata:
name: {serviceName}-{userName}
annotations:
external-dns.alpha.kubernetes.io/hostname: {serviceName}-{userName}.{clusterName}.securethebox.us
spec:
selector:
app: {serviceName}-{userName}
ports:
- name: http
targetPort: 80
port: 80
- name: cloudcmd
targetPort: 9000
port: 9000
"""
with open('./app_controllers/infrastructure/kubernetes-deployments/services/'+str(sys.argv[2])+'/02_service-'+str(sys.argv[1])+'-'+str(sys.argv[2])+'-'+str(sys.argv[3])+'.yml', 'w') as yfile:
yfile.write(template.format(**kwargs))
if __name__ == "__main__":
writeConfig(clusterName=str(sys.argv[1]),serviceName=str(sys.argv[2]),userName=str(sys.argv[3])) | [
"[email protected]"
] | |
fa73f9959ecf4202b9cf34bd3d9e1fd01ac91e7b | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v6/enums/types/campaign_draft_status.py | 34bd35db00b4fe08cb76ba66e68970f75c6342cc | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"CampaignDraftStatusEnum",},
)
class CampaignDraftStatusEnum(proto.Message):
r"""Container for enum describing possible statuses of a campaign
draft.
"""
class CampaignDraftStatus(proto.Enum):
r"""Possible statuses of a campaign draft."""
UNSPECIFIED = 0
UNKNOWN = 1
PROPOSED = 2
REMOVED = 3
PROMOTING = 5
PROMOTED = 4
PROMOTE_FAILED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
2366599f0e567f608fa33005f7ca20f30c303877 | 462c56e7454c97e0541588b9be66a4e216ea20fd | /119.pascals-triangle-ii.py | 7fd4a9b31ba4be874c9ab04d023f055a5b8a5048 | [] | no_license | LouisYLWang/leetcode_python | d5ac6289e33c5d027f248aa3e7dd66291354941c | 2ecaeed38178819480388b5742bc2ea12009ae16 | refs/heads/master | 2020-05-27T08:38:48.532000 | 2019-12-28T07:08:57 | 2019-12-28T07:08:57 | 188,549,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | #
# @lc app=leetcode id=119 lang=python3
#
# [119] Pascal's Triangle II
#
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
if rowIndex == 1:
return [1,1]
res = [1]
priorRow = self.getRow(rowIndex - 1)
i = 0
while i <= rowIndex - 2:
res.append(priorRow[i] + priorRow[i+1])
i += 1
return res + [1]
| [
"[email protected]"
] | |
7f85211534fa866b3fccc650122149034106ce76 | 08056428e2e12c192a06ccf2b775df09263582dd | /arcade/examples/particle_fireworks.py | b40b57bd1b70238f28cf4c6cd58265775b52e0c0 | [
"MIT"
] | permissive | chris48s/arcade | 57d2c4f5986216748adc420d4d2f09153f96f417 | 930ecd8be2d47db1f65173c6f1afc00c0df545c8 | refs/heads/master | 2020-07-13T04:35:30.235743 | 2019-08-28T12:54:43 | 2019-08-28T12:54:43 | 204,991,051 | 0 | 0 | NOASSERTION | 2019-08-28T18:52:19 | 2019-08-28T17:52:18 | null | UTF-8 | Python | false | false | 13,870 | py | """
Particle Fireworks
Use a fireworks display to demonstrate "real-world" uses of Emitters and Particles
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_list_particle_fireworks
"""
import arcade
from arcade import Point, Vector
from arcade.utils import _Vec2 # bring in "private" class
from arcade.examples.frametime_plotter import FrametimePlotter
import os
import random
import pyglet
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Particle based fireworks"
LAUNCH_INTERVAL_MIN = 1.5
LAUNCH_INTERVAL_MAX = 2.5
TEXTURE = "images/pool_cue_ball.png"
RAINBOW_COLORS = (
arcade.color.ELECTRIC_CRIMSON,
arcade.color.FLUORESCENT_ORANGE,
arcade.color.ELECTRIC_YELLOW,
arcade.color.ELECTRIC_GREEN,
arcade.color.ELECTRIC_CYAN,
arcade.color.MEDIUM_ELECTRIC_BLUE,
arcade.color.ELECTRIC_INDIGO,
arcade.color.ELECTRIC_PURPLE,
)
SPARK_TEXTURES = [arcade.make_circle_texture(15, clr) for clr in RAINBOW_COLORS]
SPARK_PAIRS = [
[SPARK_TEXTURES[0], SPARK_TEXTURES[3]],
[SPARK_TEXTURES[1], SPARK_TEXTURES[5]],
[SPARK_TEXTURES[7], SPARK_TEXTURES[2]],
]
ROCKET_SMOKE_TEXTURE = arcade.make_soft_circle_texture(15, arcade.color.GRAY)
PUFF_TEXTURE = arcade.make_soft_circle_texture(80, (40, 40, 40))
FLASH_TEXTURE = arcade.make_soft_circle_texture(70, (128, 128, 90))
CLOUD_TEXTURES = [
arcade.make_soft_circle_texture(50, arcade.color.WHITE),
arcade.make_soft_circle_texture(50, arcade.color.LIGHT_GRAY),
arcade.make_soft_circle_texture(50, arcade.color.LIGHT_BLUE),
]
STAR_TEXTURES = [
arcade.make_soft_circle_texture(6, arcade.color.WHITE),
arcade.make_soft_circle_texture(6, arcade.color.PASTEL_YELLOW),
]
SPINNER_HEIGHT=75
def make_spinner():
spinner = arcade.Emitter(
center_xy=(SCREEN_WIDTH / 2, SPINNER_HEIGHT - 5),
emit_controller=arcade.EmitterIntervalWithTime(0.025, 2.0),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=random.choice(STAR_TEXTURES),
change_xy=(0, 6.0),
lifetime=0.2
)
)
spinner.change_angle = 16.28
return spinner
def make_rocket(emit_done_cb):
"""Emitter that displays the smoke trail as the firework shell climbs into the sky"""
rocket = RocketEmitter(
center_xy=(random.uniform(100, SCREEN_WIDTH - 100), 25),
emit_controller=arcade.EmitterIntervalWithTime(0.04, 2.0),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=ROCKET_SMOKE_TEXTURE,
change_xy=arcade.rand_in_circle((0.0, 0.0), 0.08),
scale=0.5,
lifetime=random.uniform(1.0, 1.5),
start_alpha=100,
end_alpha=0,
mutation_callback=rocket_smoke_mutator
),
emit_done_cb=emit_done_cb
)
rocket.change_x = random.uniform(-1.0, 1.0)
rocket.change_y = random.uniform(5.0, 7.25)
return rocket
def make_flash(prev_emitter):
"""Return emitter that displays the brief flash when a firework shell explodes"""
return arcade.Emitter(
center_xy=prev_emitter.get_pos(),
emit_controller=arcade.EmitBurst(3),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=FLASH_TEXTURE,
change_xy=arcade.rand_in_circle((0.0, 0.0), 3.5),
lifetime=0.15
)
)
def make_puff(prev_emitter):
"""Return emitter that generates the subtle smoke cloud left after a firework shell explodes"""
return arcade.Emitter(
center_xy=prev_emitter.get_pos(),
emit_controller=arcade.EmitBurst(4),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=PUFF_TEXTURE,
change_xy=(_Vec2(arcade.rand_in_circle((0.0, 0.0), 0.4)) + _Vec2(0.3, 0.0)).as_tuple(),
lifetime=4.0
)
)
class AnimatedAlphaParticle(arcade.LifetimeParticle):
"""A custom particle that animates between three different alpha levels"""
def __init__(
self,
filename_or_texture: arcade.FilenameOrTexture,
change_xy: Vector,
start_alpha: int = 0,
duration1: float = 1.0,
mid_alpha: int = 255,
duration2: float = 1.0,
end_alpha: int = 0,
center_xy: Point = (0.0, 0.0),
angle: float = 0,
change_angle: float = 0,
scale: float = 1.0,
mutation_callback=None,
):
super().__init__(filename_or_texture, change_xy, duration1 + duration2, center_xy, angle, change_angle, scale, start_alpha, mutation_callback)
self.start_alpha = start_alpha
self.in_duration = duration1
self.mid_alpha = mid_alpha
self.out_duration = duration2
self.end_alpha = end_alpha
def update(self):
super().update()
if self.lifetime_elapsed <= self.in_duration:
u = self.lifetime_elapsed / self.in_duration
self.alpha = arcade.lerp(self.start_alpha, self.mid_alpha, u)
else:
u = (self.lifetime_elapsed - self.in_duration) / self.out_duration
self.alpha = arcade.lerp(self.mid_alpha, self.end_alpha, u)
class RocketEmitter(arcade.Emitter):
"""Custom emitter class to add gravity to the emitter to represent gravity on the firework shell"""
def update(self):
super().update()
# gravity
self.change_y += -0.05
class FireworksApp(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
arcade.set_background_color(arcade.color.BLACK)
self.emitters = []
self.frametime_plotter = FrametimePlotter()
self.launch_firework(0)
arcade.schedule(self.launch_spinner, 4.0)
stars = arcade.Emitter(
center_xy=(0.0, 0.0),
emit_controller=arcade.EmitMaintainCount(20),
particle_factory=lambda emitter: AnimatedAlphaParticle(
filename_or_texture=random.choice(STAR_TEXTURES),
change_xy=(0.0, 0.0),
start_alpha=0,
duration1=random.uniform(2.0, 6.0),
mid_alpha=128,
duration2=random.uniform(2.0, 6.0),
end_alpha=0,
center_xy=arcade.rand_in_rect((0.0, 0.0), SCREEN_WIDTH, SCREEN_HEIGHT)
)
)
self.emitters.append(stars)
self.cloud = arcade.Emitter(
center_xy=(50, 500),
change_xy=(0.15, 0),
emit_controller=arcade.EmitMaintainCount(60),
particle_factory=lambda emitter: AnimatedAlphaParticle(
filename_or_texture=random.choice(CLOUD_TEXTURES),
change_xy=(_Vec2(arcade.rand_in_circle((0.0, 0.0), 0.04)) + _Vec2(0.1, 0)).as_tuple(),
start_alpha=0,
duration1=random.uniform(5.0, 10.0),
mid_alpha=255,
duration2=random.uniform(5.0, 10.0),
end_alpha=0,
center_xy=arcade.rand_in_circle((0.0, 0.0), 50)
)
)
self.emitters.append(self.cloud)
def launch_firework(self, delta_time):
self.frametime_plotter.add_event("launch")
launchers = (
self.launch_random_firework,
self.launch_ringed_firework,
self.launch_sparkle_firework,
)
random.choice(launchers)(delta_time)
pyglet.clock.schedule_once(self.launch_firework, random.uniform(LAUNCH_INTERVAL_MIN, LAUNCH_INTERVAL_MAX))
def launch_random_firework(self, delta_time):
"""Simple firework that explodes in a random color"""
rocket = make_rocket(self.explode_firework)
self.emitters.append(rocket)
def launch_ringed_firework(self, delta_time):
""""Firework that has a basic explosion and a ring of sparks of a different color"""
rocket = make_rocket(self.explode_ringed_firework)
self.emitters.append(rocket)
def launch_sparkle_firework(self, delta_time):
"""Firework which has sparks that sparkle"""
rocket = make_rocket(self.explode_sparkle_firework)
self.emitters.append(rocket)
def launch_spinner(self, delta_time):
"""Start the spinner that throws sparks"""
spinner1 = make_spinner()
spinner2 = make_spinner()
spinner2.angle = 180
self.emitters.append(spinner1)
self.emitters.append(spinner2)
def explode_firework(self, prev_emitter):
"""Actions that happen when a firework shell explodes, resulting in a typical firework"""
self.emitters.append(make_puff(prev_emitter))
self.emitters.append(make_flash(prev_emitter))
spark_texture = random.choice(SPARK_TEXTURES)
sparks = arcade.Emitter(
center_xy=prev_emitter.get_pos(),
emit_controller=arcade.EmitBurst(random.randint(30, 40)),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=spark_texture,
change_xy=arcade.rand_in_circle((0.0, 0.0), 9.0),
lifetime=random.uniform(0.5, 1.2),
mutation_callback=firework_spark_mutator
)
)
self.emitters.append(sparks)
def explode_ringed_firework(self, prev_emitter):
"""Actions that happen when a firework shell explodes, resulting in a ringed firework"""
self.emitters.append(make_puff(prev_emitter))
self.emitters.append(make_flash(prev_emitter))
spark_texture, ring_texture = random.choice(SPARK_PAIRS)
sparks = arcade.Emitter(
center_xy=prev_emitter.get_pos(),
emit_controller=arcade.EmitBurst(25),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=spark_texture,
change_xy=arcade.rand_in_circle((0.0, 0.0), 8.0),
lifetime=random.uniform(0.55, 0.8),
mutation_callback=firework_spark_mutator
)
)
self.emitters.append(sparks)
ring = arcade.Emitter(
center_xy=prev_emitter.get_pos(),
emit_controller=arcade.EmitBurst(20),
particle_factory=lambda emitter: arcade.FadeParticle(
filename_or_texture=ring_texture,
change_xy=arcade.rand_on_circle((0.0, 0.0), 5.0) + arcade.rand_in_circle((0.0, 0.0), 0.25),
lifetime=random.uniform(1.0, 1.6),
mutation_callback=firework_spark_mutator
)
)
self.emitters.append(ring)
def explode_sparkle_firework(self, prev_emitter):
"""Actions that happen when a firework shell explodes, resulting in a sparkling firework"""
self.emitters.append(make_puff(prev_emitter))
self.emitters.append(make_flash(prev_emitter))
spark_texture = random.choice(SPARK_TEXTURES)
sparks = arcade.Emitter(
center_xy=prev_emitter.get_pos(),
emit_controller=arcade.EmitBurst(random.randint(30, 40)),
particle_factory=lambda emitter: AnimatedAlphaParticle(
filename_or_texture=spark_texture,
change_xy=arcade.rand_in_circle((0.0, 0.0), 9.0),
start_alpha=255,
duration1=random.uniform(0.6, 1.0),
mid_alpha=0,
duration2=random.uniform(0.1, 0.2),
end_alpha=255,
mutation_callback=firework_spark_mutator
)
)
self.emitters.append(sparks)
def update(self, delta_time):
# prevent list from being mutated (often by callbacks) while iterating over it
emitters_to_update = self.emitters.copy()
# update cloud
if self.cloud.center_x > SCREEN_WIDTH:
self.cloud.center_x = 0
# update
for e in emitters_to_update:
e.update()
# remove emitters that can be reaped
to_del = [e for e in emitters_to_update if e.can_reap()]
for e in to_del:
self.emitters.remove(e)
self.frametime_plotter.end_frame(delta_time)
def on_draw(self):
arcade.start_render()
for e in self.emitters:
e.draw()
arcade.draw_lrtb_rectangle_filled(0, SCREEN_WIDTH, 25, 0, arcade.color.DARK_GREEN)
mid = SCREEN_WIDTH / 2
arcade.draw_lrtb_rectangle_filled(mid-2, mid+2, SPINNER_HEIGHT, 10, arcade.color.DARK_BROWN)
def on_key_press(self, key, modifiers):
if key == arcade.key.ESCAPE:
arcade.close_window()
def firework_spark_mutator(emitter: arcade.Emitter):
"""mutation_callback shared by all fireworks sparks"""
# gravity
emitter.change_y += -0.03
# drag
emitter.change_x *= 0.92
emitter.change_y *= 0.92
def rocket_smoke_mutator(emitter: arcade.Emitter):
emitter.scale = arcade.lerp(0.5, 3.0, emitter.lifetime_elapsed/emitter.lifetime_original)
# A Sprite's scale doesn't affect generated textures (ex: make_soft_circle_texture) or scale being animated over time.
# The fix below is copied from Sprite.update_animation().
# Bug may have been recorded here: https://github.com/pvcraven/arcade/issues/331
emitter.width = emitter._texture.width * emitter.scale
emitter.height = emitter._texture.height * emitter.scale
if __name__ == "__main__":
app = FireworksApp()
arcade.run()
app.frametime_plotter.show()
| [
"[email protected]"
] | |
98b26fe08f7c83923cba3d13f8204b817a4a9ce8 | 2581f2c98d497a6adf9bbb62730b02efea08cf80 | /stubs/scales/meter.pyi | cc7fd7bfd725f7e6a9b3eac07b4e7b0c4866789b | [] | no_license | drewp/rdfdb | 1ebbb5cf892fd86f6e3c571b94a97ecd07dd7340 | 8c71f02f989b2de1a4921640d1ca765e6d9efdb6 | refs/heads/master | 2021-04-27T00:31:22.493060 | 2019-08-09T06:15:15 | 2019-08-09T06:15:15 | 123,776,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | pyi | # Stubs for scales.meter (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from collections import UserDict
from greplin.scales import Stat
from typing import Any, Optional
TICKERS: Any
TICKER_THREAD: Any
class MeterStatDict(UserDict):
def __init__(self) -> None: ...
def __getitem__(self, item: Any): ...
def tick(self) -> None: ...
def mark(self, value: int = ...) -> None: ...
class MeterStat(Stat):
def __init__(self, name: Any, _: Optional[Any] = ...) -> None: ...
def __set__(self, instance: Any, value: Any) -> None: ...
class MeterDict(UserDict):
parent: Any = ...
instance: Any = ...
def __init__(self, parent: Any, instance: Any) -> None: ...
def __getitem__(self, item: Any): ...
class MeterDictStat(Stat): ...
| [
"[email protected]"
] | |
e829bbaaca6fdc6a724b41833d5f5934453f1b83 | 75e951dcf749f62f2a292774968fe95fc4a353c8 | /boa3/neo/__init__.py | d2af9ae09d866dbb6fe33b73fd8e4d6db9a71ff1 | [
"Apache-2.0"
] | permissive | jplippi/neo3-boa | e0a199d1ed2fa39abe09ebd3c013c360ca87f544 | 052be4adebb665113715bb80067d954f7ad85ad5 | refs/heads/development | 2022-08-19T10:17:43.610854 | 2020-05-25T20:30:42 | 2020-05-25T20:30:42 | 265,959,419 | 0 | 0 | Apache-2.0 | 2020-05-25T20:39:59 | 2020-05-21T21:54:24 | Python | UTF-8 | Python | false | false | 727 | py | def to_script_hash(data_bytes: bytes) -> bytes:
"""
Converts a data to a script hash.
:param data_bytes: data to hash.
:type data_bytes: bytearray or bytes
:return: the scripthash of the data
:rtype: bytes
"""
from boa3.neo import cryptography
return cryptography.hash160(data_bytes)
def to_hex_str(data_bytes: bytes) -> str:
"""
Converts bytes into its string hex representation.
:param data_bytes: data to represent as hex.
:type data_bytes: bytearray or bytes
:return: the hex representation of the data
:rtype: str
"""
if isinstance(data_bytes, bytes):
data_bytes = bytearray(data_bytes)
data_bytes.reverse()
return data_bytes.hex()
| [
"[email protected]"
] | |
16851733ca7f1bda85d54a12b6eaaa4a3d14a0ce | e0b0abc1e8d442adb63cf0e5b8641f4185c5611c | /manage.py | f16ab303512e46b533318951b49e21403bd1a635 | [] | no_license | nramiscal/petDemo | f7d0c149941b2efabdb74a4e10717f540b6d7eee | c4d341105c1b1f14e868f198199fe37cf3687c8f | refs/heads/master | 2020-05-14T08:21:23.669095 | 2019-04-17T23:02:24 | 2019-04-17T23:02:24 | 181,721,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pets.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
399e58269bd4f66695f421807152aa52627ff652 | 82a9077bcb5a90d88e0a8be7f8627af4f0844434 | /google-cloud-sdk/lib/tests/unit/surface/storage/list_test.py | eb6c5f661193d257bacafa028af1ae6c5b203704 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | piotradamczyk5/gcloud_cli | 1ae2553595e569fad6ce84af62b91a7ee5489017 | 384ece11040caadcd64d51da74e0b8491dd22ca3 | refs/heads/master | 2023-01-01T23:00:27.858583 | 2020-10-21T04:21:23 | 2020-10-21T04:21:23 | 290,238,061 | 0 | 0 | null | 2020-10-19T16:43:36 | 2020-08-25T14:31:00 | Python | UTF-8 | Python | false | false | 2,158 | py | # Lint as: python3
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for the gcloud storage list command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base as calliope_base
from tests.lib import test_case
from tests.lib.surface.storage.gcs_api_unit_test_base import GcsApiUnitTestBase
class ListTestAlpha(GcsApiUnitTestBase):
"""Test cases for features in Alpha.
When a feature moves to beta, move the corresponding tests to a superclass of
this one where self.track = calliope_base.ReleaseTrack.BETA, details here:
go/gcloud-test-howto#how-to-test-multiple-release-tracks.
This will ensure that tests from past releases run for the alpha release.
"""
def PreSetUp(self):
self.track = calliope_base.ReleaseTrack.ALPHA
def SetUp(self):
self.bucket_name = 'bucket1'
self.object_names = ['file0', 'file1', 'asdf']
self.bucket_contents = self.messages.Objects(
items=[self.messages.Object(name=i) for i in self.object_names])
self.client.objects.List.Expect(
self.messages.StorageObjectsListRequest(bucket=self.bucket_name),
self.bucket_contents)
def test_list_bucket(self):
observed = self.Run('storage list gs://' + self.bucket_name)
observed_paths = [i['path'] for i in observed]
expected_paths = [
'gs://%s/%s' % (self.bucket_name, i) for i in self.object_names
]
self.assertCountEqual(observed_paths, expected_paths)
if __name__ == '__main__':
test_case.main()
| [
"[email protected]"
] | |
802caa784aff470ce4839a56761cdd49d43d388a | 06b2eed882d8d7034fb7c57b648d5aa37d7f617b | /pycharmproject/爬虫/菜鸡/gifduoxiancheng.py | 4c717acdfc70ad2136f7dce63eb021d9e85d0c52 | [] | no_license | 1974410167/python_projects | 558e2e7a4ea66b083ebd6d2f808b725e1bd153d6 | 81a97cbf41de12bdc3dbc46edf2931075ac4f758 | refs/heads/main | 2023-06-22T09:41:22.038620 | 2023-06-09T15:09:44 | 2023-06-09T15:09:44 | 355,036,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py | import requests
import queue
import time
from bs4 import BeautifulSoup
import threading
def g_url(q1):
number=0
for i in range(46300,46391):
if number==10:
break
for j in range(0,10):
if number==10:
break
url = "https://www.youquba.net/xieedongtaitu/2017/1217/"
url=url+str(i)+"_"+str(j)+".html"
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
r=requests.get(url,headers=headers)
r.encoding = "utf-8"
if r.status_code==200:
soup=BeautifulSoup(r.text,"lxml")
b=soup.select("p a img")
for n in b:
q1.put(n.get("src"))
number=number+1
print(f"获得第{number}个URL成功!")
if number==10:
break
print(f"获取{number}个URL完毕")
def down_gif(q1):
number2=0
root = "D://gif//"
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
while not q1.empty():
m=q1.get()
try:
r1=requests.get(m,headers=headers,timeout=0.3)
except error.URLError as e1:
print(e1.reason)
except error.HTTPError as e2:
print(e2.reason)
else:
if r1.status_code == 200:
path = root + m.split("/")[-1]
with open(path, "wb") as f:
f.write(r1.content)
number2=number2+1
print(f"写入第{number2}个文件成功!")
if __name__=="__main__":
q1=queue.Queue()
t1=threading.Thread(target=g_url,args=(q1,))
t2=threading.Thread(target=down_gif,args=(q1,))
t1.start()
t2.start()
t1.join()
t2.join()
| [
"[email protected]"
] | |
d646b8ec935a81c7baa643d281c1ad1485502308 | a9c0daa4a7b9a4d7341afcab270c5b5debb8c13f | /env/lib/python3.6/copyreg.py | e492049eb2a920d7955e40b850a43d8d3b20bc6c | [] | no_license | phamcong/alienator-plf | bad8c4e003fd189c43243b31ef2b975b6f154754 | ea65628af66fbca51f2248ceb4ba93f858dbddce | refs/heads/master | 2022-11-26T01:28:38.286261 | 2017-11-07T15:12:08 | 2017-11-07T15:12:08 | 109,412,097 | 0 | 1 | null | 2020-07-25T23:43:17 | 2017-11-03T15:30:22 | JavaScript | UTF-8 | Python | false | false | 50 | py | /Users/cuongpham/anaconda/lib/python3.6/copyreg.py | [
"[email protected]"
] | |
2e7a302f37e01983e5cce2dbf5631abfb32dbcac | e811662c890217c77b60aa2e1295dd0f5b2d4591 | /pinduoduo.py | 1e343affa54327a68a706b640607c00d12be0d0f | [] | no_license | rewonderful/MLC | 95357f892f8cf76453178875bac99316c7583f84 | 7012572eb192c29327ede821c271ca082316ff2b | refs/heads/master | 2022-05-08T05:24:06.929245 | 2019-09-24T10:35:22 | 2019-09-24T10:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | # #!/usr/bin/env python
# # _*_ coding:utf-8 _*_
# import random
# # def partition(nums,l,r):
# # pivot = nums[l]
# # while l<r:
# # while l<r and nums[r] >= pivot:
# # r -= 1
# # nums[l] = nums[r]
# # while l < r and nums[l] < pivot:
# # l += 1
# # nums[r] = nums[l]
# # nums[l] = pivot
# # return l
#
#
# def quick_sort(nums,lo,hi):
# if lo < hi:
# l ,r = lo,hi
# pivot = nums[l]
# while l<r:
# while l<r and nums[r] >= pivot:
# r -= 1
# nums[l] = nums[r]
# while l < r and nums[l] < pivot:
# l += 1
# nums[r] = nums[l]
# nums[l] = pivot
# quick_sort(nums,lo,l-1)
# quick_sort(nums,l+1,hi)
# if __name__ == '__main__':
# n = 50
# print("BEFORE")
# nums = [ random.randrange(n) for _ in range(n//2)]
# print(nums)
# quick_sort(nums,0,len(nums)-1)
# print('AFTER')
# print(nums)
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# def binarySearch(nums,target):
# low = 0
# high = len(nums)-1
#
# while(low <= high):
# mid = int((low + high) / 2)
#
# if target == nums[mid]:
# return mid
# if target > nums[mid]:
# low = mid+1
# else:
# high = mid - 1
# return 0
# def binarySearch(nums,target):
# lo ,hi = 0,len(nums)-1
# while lo <= hi:
# mid = (lo+hi)//2
# if mid == target:
# return mid
# elif nums[mid] < target:
# lo = mid + 1
# else:
# hi = mid - 1
# return -1
#
#
# if __name__ == '__main__':
# nums = [1,2,3,4,5,9,11,13,222,333,444,555]
# target = 5
# print(binarySearch(nums,target))
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
def merge(nums):
if len(nums) < 2:
return nums
mid = len(nums)//2
left_part = merge(nums[:mid])
right_part = merge(nums[mid:])
return mergesort(left_part,right_part)
def mergesort(left,right):
i ,j = 0,0
result = []
while i < len(left) and j < len(right):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
if i < len(left):
result.extend(left[i:])
if j < len(right):
result.extend(right[j:])
return result
if __name__ == '__main__':
print(merge([9,8,7,6,5,4,3,2,1,0]))
#print(mergesort([1,5,7],[2,4,6]))
| [
"[email protected]"
] | |
b4f8fae687ffc6edc12f388af9fc9e0d9dd49822 | f28adfe93e04efb3d915965bc5339b25324f5d8c | /19_df2dict.py | 31929424b98dc2cf9ce62980489b56e36b778983 | [] | no_license | aihill/start_Pandas | 57059f8fec9b0fc74d39eaae2744dda0a82e8b08 | ac1ee9a67f6fb05841258dbfcfe2c980059301c1 | refs/heads/master | 2022-04-20T17:03:19.167668 | 2017-08-28T03:09:06 | 2017-08-28T03:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | #load pandas as pd
import pandas as pd
#raw_data
raw_data = {'name':['song','park','kim','na','won','jun','song'],
'residence':['seoul','seoul','busan','busan','incheon','incheon','seoul'],
'univ':['seoul','chungang','korea','hanyang','seoul','kaist','seoul'],
'score':[90,99,96,78,70,100,90],
'student_number':[2000,2001,2002,2003,2004,2005,2000],
'sex':['male','male','female','female','male','male','male']}
df = pd.DataFrame(raw_data, columns= ['student_number','name','sex','residence','univ','score'])
'''
student_number name sex residence univ score
0 2000 song male seoul seoul 90
1 2001 park male seoul chungang 99
2 2002 kim female busan korea 96
3 2003 na female busan hanyang 78
4 2004 won male incheon seoul 70
5 2005 jun male incheon kaist 100
6 2000 song male seoul seoul 90
'''
#df2dict
#orient: dict
print(df.to_dict())
'''
{'student_number': {0: 2000, 1: 2001, 2: 2002, 3: 2003, 4: 2004, 5: 2005, 6: 2000},
'name': {0: 'song', 1: 'park', 2: 'kim', 3: 'na', 4: 'won', 5: 'jun', 6: 'song'},
'sex': {0: 'male', 1: 'male', 2: 'female', 3: 'female', 4: 'male', 5: 'male', 6: 'male'},
'residence': {0: 'seoul', 1: 'seoul', 2: 'busan', 3: 'busan', 4: 'incheon', 5: 'incheon', 6: 'seoul'},
'univ': {0: 'seoul', 1: 'chungang', 2: 'korea', 3: 'hanyang', 4: 'seoul', 5: 'kaist', 6: 'seoul'},
'score': {0: 90, 1: 99, 2: 96, 3: 78, 4: 70, 5: 100, 6: 90}}
'''
#orienct:list
print(df.to_dict('list'))
'''
{'student_number': [2000, 2001, 2002, 2003, 2004, 2005, 2000],
'name': ['song', 'park', 'kim', 'na', 'won', 'jun', 'song'],
'sex': ['male', 'male', 'female', 'female', 'male', 'male', 'male'],
'residence': ['seoul', 'seoul', 'busan', 'busan', 'incheon', 'incheon', 'seoul'],
'univ': ['seoul', 'chungang', 'korea', 'hanyang', 'seoul', 'kaist', 'seoul'],
'score': [90, 99, 96, 78, 70, 100, 90]}
''' | [
"[email protected]"
] | |
15ed0f8428b2d1f891f40718268c67e0d7c67e20 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/2278.py | 9906776748b33be02c1af34a2e169e7c60b0f786 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!/usr/bin/env python3
ip=open("A2.in",'r')
no = ip.readline()
no = int(no)
case_no = 0;
for line in ip:
#print line
case_no+=1
line = line.split(' ',1)
smax = int(line[0])
shy = line[1]
#print smax
minf = 0
count = int(shy[0])
for i in range(1,smax+1):
x=int(shy[i])
if(count<i):
extraf=i-count
count+=extraf
minf+=extraf
count+=x
print "Case #" + str(case_no) + ": " + str(minf)
| [
"[email protected]"
] | |
d35d195c09580ae5b7a11b85392eb618093b5bd9 | b5fbc01deb2060b2222f885fca0433844a9e7cd1 | /web/lib/python3.6/site-packages/coreapi/codecs/python.py | d0bebf645b79b6bea9fa954a1c5cc1ec426eb4c1 | [] | no_license | Carlosdher/reposicao | 50973b15f8a2bd3a5a6b83b06efe0050f612bb83 | 71ef93e694888e54c79e98e8568c3417ee82ec96 | refs/heads/master | 2020-03-18T04:13:59.493126 | 2018-08-02T13:06:55 | 2018-08-02T13:06:55 | 134,277,105 | 2 | 0 | null | 2018-07-27T19:20:36 | 2018-05-21T14:01:26 | Python | UTF-8 | Python | false | false | 2,784 | py | # Note that `DisplayCodec` is deliberately omitted from the documentation,
# as it is considered an implementation detail.
# It may move into a utility function in the future.
from __future__ import unicode_literals
from coreapi.codecs.base import BaseCodec
from coreapi.document import Document, Link, Array, Object, Error, Field
def _to_repr(node):
if isinstance(node, Document):
content = ', '.join([
'%s: %s' % (repr(key), _to_repr(value))
for key, value in node.items()
])
return 'Document(url=%s, title=%s, content={%s})' % (
repr(node.url), repr(node.title), content
)
elif isinstance(node, Error):
content = ', '.join([
'%s: %s' % (repr(key), _to_repr(value))
for key, value in node.items()
])
return 'Error(title=%s, content={%s})' % (
repr(node.title), content
)
elif isinstance(node, Object):
return '{%s}' % ', '.join([
'%s: %s' % (repr(key), _to_repr(value))
for key, value in node.items()
])
elif isinstance(node, Array):
return '[%s]' % ', '.join([
_to_repr(value) for value in node
])
elif isinstance(node, Link):
args = "url=%s" % repr(node.url)
if node.action:
args += ", action=%s" % repr(node.action)
if node.encoding:
args += ", encoding=%s" % repr(node.encoding)
if node.transform:
args += ", transform=%s" % repr(node.transform)
if node.description:
args += ", description=%s" % repr(node.description)
if node.fields:
fields_repr = ', '.join(_to_repr(item) for item in node.fields)
args += ", fields=[%s]" % fields_repr
return "Link(%s)" % args
elif isinstance(node, Field):
args = repr(node.name)
if not node.required and not node.location:
return args
if node.required:
args += ', required=True'
if node.location:
args += ', location=%s' % repr(node.location)
if node.description:
args += ', description=%s' % repr(node.description)
return 'Field(%s)' % args
return repr(node)
class PythonCodec(BaseCodec):
"""
A Python representation of a Document, for use with '__repr__'.
"""
media_type = 'text/python'
def encode(self, document, **options):
# Object and Array only have the class name wrapper if they
# are the outermost element.
if isinstance(document, Object):
return 'Object(%s)' % _to_repr(document)
elif isinstance(document, Array):
return 'Array(%s)' % _to_repr(document)
return _to_repr(document)
| [
"[email protected]"
] | |
beddfd8cd214a7b078fb1134232bc8c211a8892c | 1c43f97456f3cab00067932dfbd971c22e91267e | /rqt_yn_btn/setup.py | ebca86f4e2fab0109e87d3f3f2bb239780f922e3 | [] | no_license | m1a1k0o/2014-semi | 661cc2692d70eadaed6a4d11ef85a9ac20914cb7 | d57f088bf6b1eeed7e5d14b42034a70517281293 | refs/heads/master | 2021-01-15T21:15:26.915602 | 2015-03-25T09:17:36 | 2015-03-25T09:17:36 | 32,906,069 | 0 | 0 | null | 2015-03-26T03:44:15 | 2015-03-26T03:44:15 | null | UTF-8 | Python | false | false | 346 | py | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['rqt_yn_btn'],
package_dir={'': 'src'},
requires=['std_msgs', 'rospy']
)
setup(**setup_args)
| [
"[email protected]"
] | |
af41e25e54049b6a7d90ced32ac25553bd20a520 | 1fbb4d511dd15490ab70bd85cc404dab06a7e37c | /model.py | 0bef3b02a2b12da4e4510d201b0445ac1d872b5a | [] | no_license | aboerzel/udacity-deep-reinforcement-learning-p2-continuous-control | 7f32f3cd444d3ed5e4f255c4bc9642e6469209bc | 0416434749b7758187a2306833d328eba9150668 | refs/heads/master | 2023-03-01T18:29:04.285342 | 2021-02-06T11:15:32 | 2021-02-06T11:15:32 | 333,823,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=200, fc2_units=200): # fc1_units=400, fc2_units=300
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.bn1 = nn.BatchNorm1d(state_size) # BN added
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = self.bn1(state) # BN added
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
return torch.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=200, fc2_units=200): # fc1_units=400, fc2_units=300
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.bn1 = nn.BatchNorm1d(state_size) # BN added
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = self.bn1(state) # BN added
# xs = F.relu(self.fcs1(xs))
xs = F.leaky_relu(self.fcs1(xs))
x = torch.cat((xs, action), dim=1)
# x = F.relu(self.fc2(x))
x = F.leaky_relu(self.fc2(x))
return self.fc3(x)
| [
"[email protected]"
] | |
ce8b9fcf3d54e14ec62b9e66411670117ff29710 | 66bc5c67523126170f5a14370736e2e15dbc6c13 | /dynamic_modeling/dataset.py | c96d5bd1bb71576da1d5a87b7d433c2a71460f12 | [] | no_license | windweller/smartbot_offline | 5e69a576753bff35af59b1cb78da8958fa137133 | 4e49e5e8e5a3add88d81c662d0d91388e10fc646 | refs/heads/main | 2023-07-01T18:00:40.937260 | 2021-08-05T00:28:44 | 2021-08-05T00:28:44 | 334,787,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,963 | py | import os
import copy
import pickle
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
def load_data(data_path):
print('Loading file...')
orig = pd.read_csv(data_path)
# ==== State-model ====
# One Markov Data Loader
class MarkovTutorStates(Dataset):
def __init__(self, data_path, state_ids_path, include_correctness=False):
print('Loading file...')
data = pd.read_csv(data_path)
self.data = data
self.include_correctness = include_correctness
self.action_vocab = ['', 'tell', 'elicit']
self.state_vocab = json.load(open(state_ids_path))
self.KC_list = [1, 14, 20, 21, 22, 24, 27, 28]
self.kc_used_headers = [f'KC_{i}_used' for i in self.KC_list]
self.kc_pretest_headers = [f'Pre_test_KC_{i}_score' for i in self.KC_list]
self.kc_cum_correct_headers = [f'KC{i}_correct_cum' for i in self.KC_list]
self.kc_cum_incorrect_headers = [f'KC{i}_incorrect_cum' for i in self.KC_list]
if include_correctness:
self.feats = self.data[self.kc_used_headers + self.kc_pretest_headers + self.kc_cum_correct_headers + \
self.kc_cum_incorrect_headers]
else:
self.feats = self.data[self.kc_used_headers + self.kc_pretest_headers]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
# this really speaks to the advantage of having
# a feat data matrix pre-processed
row = self.data.iloc[index]
# feats: {action, prev_state, KCs, pre-test KCs}
# 8 + 8 = 16
feats = self.feats.iloc[index]
feats = np.array(feats)
target = self.state_vocab.index(row['Next_state'])
action = row['Action']
if pd.isna(action):
action = ''
item = dict(action=self.action_vocab.index(action),
state=self.state_vocab.index(row['State_id']),
feats=torch.FloatTensor(feats),
targets=target)
return item
def dset_args(self):
"""
Return a dict of args that models might need to know e.g. input dimension of features.
This dictionary will be available in the model config.
"""
if self.include_correctness:
return dict(feat_dim=32)
else:
return dict(feat_dim=16) # self.feat.shape[1]
# One Traj Data Loader
# TODO: add final score
# TODO: final score needs an additional loading thingy
# TODO: but adding it shouldn't be too difficult
# TODO: it's just a different prediction target
class TrajTutorState(Dataset):
def __init__(self, data_path, state_ids_path, nlg_score_path="./data/nlg_score.csv",
include_correctness=False,
include_answer_pred=False, pred_target='post_test'):
print('Loading file...')
data = pd.read_csv(data_path)
self.data = data
# load scores
nlg_score = pd.read_csv(nlg_score_path)
nlg_score['Student_id'] = nlg_score['student_id'].map(lambda x: 'Exp' + x)
self.nlg_score = nlg_score.drop(columns='student_id')
self.pred_target = pred_target
unique_uids = np.unique(data['Student_id'])
self.unique_uids = unique_uids
self.include_correctness = include_correctness
self.action_vocab = ['', 'tell', 'elicit']
self.state_vocab = json.load(open(state_ids_path))
self.correctness_vocab = [0, 1, -1] # -1 means we don't need to predict correctness
self.KC_list = [1, 14, 20, 21, 22, 24, 27, 28]
self.kc_used_headers = [f'KC_{i}_used' for i in self.KC_list]
self.kc_pretest_headers = [f'Pre_test_KC_{i}_score' for i in self.KC_list]
self.kc_cum_correct_headers = [f'KC{i}_correct_cum' for i in self.KC_list]
self.kc_cum_incorrect_headers = [f'KC{i}_incorrect_cum' for i in self.KC_list]
if include_correctness:
self.feats = self.data[self.kc_used_headers + self.kc_pretest_headers + self.kc_cum_correct_headers + \
self.kc_cum_incorrect_headers]
else:
self.feats = self.data[self.kc_used_headers] # + self.kc_pretest_headers
self.feats = self.feats.to_numpy().astype(np.dtype('float32'))
def __len__(self):
return len(self.unique_uids)
def __getitem__(self, index):
user_id = self.unique_uids[index]
df = self.data[self.data['Student_id'] == user_id]
# we grab everything
indices = np.where(self.data['Student_id'] == user_id)[0]
feats = self.feats[indices]
seq_len = feats.shape[0]
# feats: {action, prev_state, KCs, pre-test KCs}
# 8 + 8 = 16
targets = df['Next_state'].apply(self.state_vocab.index)
actions = df['Action'].fillna(value="").apply(self.action_vocab.index)
states = df['State_id'].apply(self.state_vocab.index)
# we add answer correctness prediction in here for the joint task
# otherwise we can ignore this
# add a mask, only predict under "elicit"
# answer prediction masks
answer_correctness = df['Correctness'].apply(self.correctness_vocab.index)
# we take all prediction targets (whether it's NLG or raw post-test score)
stu_id = user_id
# kc_list - 1 is to line up with pandas row index
perf = self.nlg_score[self.nlg_score['Student_id'] == stu_id].iloc[np.array(self.KC_list)-1]
scores = perf[self.pred_target].to_numpy()
pre_scores = perf['pre_test'].to_numpy()
item = dict(user_id=user_id,
action=torch.LongTensor(np.asarray(actions)),
state=torch.LongTensor(np.asarray(states)),
feats=torch.FloatTensor(feats),
targets=torch.LongTensor(np.asarray(targets)),
answer_correctness=torch.LongTensor(np.asarray(answer_correctness)),
answer_masks=torch.LongTensor(np.asarray(actions) == self.action_vocab.index("elicit")),
seq_len=int(seq_len),
score_targets=torch.FloatTensor(scores),
pre_test_scores=torch.FloatTensor(pre_scores))
return item
def dset_args(self):
"""
Return a dict of args that models might need to know e.g. input dimension of features.
This dictionary will be available in the model config.
"""
if self.include_correctness:
return dict(feat_dim=32)
else:
return dict(feat_dim=self.feats.shape[1])
def pad_tensor(A, num_pad, fill=0):
shape = A.shape
if len(shape) > 1:
p_shape = copy.deepcopy(list(shape))
p_shape[0] = num_pad
P = torch.zeros(*p_shape) + fill
else:
P = torch.zeros(num_pad) + fill
A = torch.cat([A, P], dim=0)
return A
def tutor_collate_fn(batch):
u_ids = [b['user_id'] for b in batch]
seq_lens = [b['seq_len'] for b in batch]
max_seq_len = max(seq_lens)
seq_lens = torch.LongTensor(seq_lens)
actions = []
states = []
feats = []
masks = []
targets = []
answers = []
answer_masks = []
score_targets = []
pre_test_scores = []
for row in batch:
seq_len_i = row['seq_len']
pad_len = max_seq_len - seq_len_i
mask_i = [1 for _ in range(seq_len_i)] + [0 for _ in range(pad_len)]
mask_i = torch.LongTensor(mask_i)
action_i = pad_tensor(row['action'], pad_len)
state_i = pad_tensor(row['state'], pad_len)
feats_i = pad_tensor(row['feats'], pad_len)
targets_i = pad_tensor(row['targets'], pad_len)
answer_corr_i = pad_tensor(row['answer_correctness'], pad_len)
answer_mask_i = pad_tensor(row['answer_masks'], pad_len) # this only works if fill=0, otherwise error
actions.append(action_i)
states.append(state_i)
feats.append(feats_i)
targets.append(targets_i)
masks.append(mask_i)
answers.append(answer_corr_i)
answer_masks.append(answer_mask_i)
score_targets.append(row['score_targets'])
pre_test_scores.append(row['pre_test_scores'])
score_targets = torch.stack(score_targets)
pre_test_scores = torch.stack(pre_test_scores)
actions = torch.stack(actions)
states = torch.stack(states)
targets = torch.stack(targets)
feats = torch.stack(feats)
masks = torch.stack(masks)
answers = torch.stack(answers)
answer_masks = torch.stack(answer_masks)
batch = dict(user_id=u_ids,
action=actions,
state=states,
feats=feats,
targets=targets,
seq_len=seq_lens,
masks=masks,
answers=answers,
action_masks=answer_masks,
score_targets=score_targets,
pre_test_scores=pre_test_scores)
return batch
# ===== Correctness Tracking =====
# TODO: add an option to filter out cases like 'tell' or no action
class MarkovTutorAnswers(Dataset):
def __init__(self, data_path, state_ids_path):
print('Loading file...')
data = pd.read_csv(data_path)
self.data = data
self.action_vocab = ['', 'tell', 'elicit']
self.state_vocab = json.load(open(state_ids_path))
self.KC_list = [1, 14, 20, 21, 22, 24, 27, 28]
self.correctness_vocab = [0, 1, -1] # -1 means we don't need to predict correctness
self.kc_used_headers = [f'KC_{i}_used' for i in self.KC_list]
self.kc_pretest_headers = [f'Pre_test_KC_{i}_score' for i in self.KC_list]
self.kc_cum_correct_headers = [f'KC{i}_correct_cum' for i in self.KC_list]
self.kc_cum_incorrect_headers = [f'KC{i}_incorrect_cum' for i in self.KC_list]
self.feats = self.data[self.kc_used_headers + self.kc_pretest_headers + self.kc_cum_correct_headers + \
self.kc_cum_incorrect_headers]
# TODO: we could use a mask here too...but too complicated and not necessary
def __len__(self):
return len(self.data)
def __getitem__(self, index):
# this really speaks to the advantage of having
# a feat data matrix pre-processed
row = self.data.iloc[index]
# feats: {action, prev_state, KCs, pre-test KCs}
# 8 + 8 = 16
feats = self.feats.iloc[index]
feats = np.array(feats)
target = self.correctness_vocab.index(int(row['Correctness']))
action = row['Action']
if pd.isna(action):
action = ''
item = dict(action=self.action_vocab.index(action),
state=self.state_vocab.index(row['State_id']),
feats=torch.FloatTensor(feats),
action_masks=action == 'elicit', # elicit!
targets=target)
return item
def dset_args(self):
"""
Return a dict of args that models might need to know e.g. input dimension of features.
This dictionary will be available in the model config.
"""
return dict(feat_dim=32)
if __name__ == '__main__':
DATA_DIR = '/data/anie/offline_rl/data/'
print("========== State Dataset =========")
dset = MarkovTutorStates(os.path.join(DATA_DIR, 'dynamics_dataset_train.csv'),
os.path.join(DATA_DIR, 'state_ids.json'))
dl = DataLoader(dset, batch_size=8)
for dp in dl:
print(dp)
break
print("========== Answer Dataset =========")
dset = MarkovTutorAnswers(os.path.join(DATA_DIR, 'dynamics_dataset_train.csv'),
os.path.join(DATA_DIR, 'state_ids.json'))
dl = DataLoader(dset, batch_size=8)
for dp in dl:
print(dp)
break
| [
"[email protected]"
] | |
292073a7cb374caacd4a351d4ed2d1ae9d8195b8 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /day08/homework/core/certification.py | 7e34aeb34cbafe6aab843acb6b2657e232b577e5 | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 3,740 | py | # -*- coding: utf-8 -*-
__author__ = 'caiqinxiong_cai'
# 2019/8/21 10:37
import pickle
import hashlib
from conf import settings as ss
from core.log import Log as log
class Certification:
'''登录认证类'''
def __init__(self):
pass
@staticmethod # 类的装饰器,静态方法 没有用到默认参数(self),不能使用类变量和实例变量,实现实例化使用 C().f(),也可以不实例化调用该方法 C.f()
def written_information(file_name, content, mode='ab'):
'''写入信息'''
with open('%s' % file_name, mode='%s' % mode) as f:
pickle.dump(content, f)
@staticmethod
def read_information(file_name):
'''读取信息'''
try:
with open('%s' % file_name, mode='rb') as f:
while True:
try:
r = pickle.load(f)
for k, v in r.__dict__.items():
yield k, v # 以迭代器方式返回,节省内存使用
except EOFError:
break
except:
return ("", "")
@staticmethod
def change_hashlib(password):
'''将明文转换成密文'''
md5 = hashlib.md5()
md5.update(password.encode('utf-8'))
ret = md5.hexdigest()
return ret
@classmethod # 类的装饰器,类方法,该函数只能访问到类的数据属性,不能获取实例的数据属性,默认传参数cls,表示当前所在的类名
def check_user(cls, user, name, kind):
'''校验账号'''
for k, v in name:
if 'username' == k and user == v.split(':')[-1].strip():
passwd = cls.change_hashlib(input('请输入密码 :').strip())
if passwd == name.__next__()[-1].split(':')[-1].strip():
return user, passwd, name.__next__()[-1], name.__next__()[-1], name.__next__()[-1], kind
else:
log.warning('密码校验失败!')
return False
@property # 类的装饰器,将一个方法伪装成类的属性来使用,注意:函数没有传入参数时使用
def login(self):
'''登录验证'''
for i in range(3): # 3次机会
user = input('请输入登录名 :').strip()
if user in ss.admin_dict: # 优先校验管理员账号,管理员账号总会比普通账号少嘛。
passwd = self.change_hashlib(input('请输入管理员密码 :').strip())
if ss.admin_dict[user] == passwd:
print("*" * 25 + '\n登录成功!%s管理员!' % user)
return user
else:
log.warning('管理员密码校验失败!')
continue
# 讲师账号校验
teacher = Certification().read_information(ss.teacher_file) # 读取讲师账号信息文件,返回迭代器
ret = self.check_user(user, teacher, 'teacher')
if ret:
print("*" * 25 + '\n登录成功!%s讲师!' % user)
return ret
# 学生账号校验
student = Certification().read_information(ss.student_file) # 读取学生账号信息文件,返回迭代器
ret = self.check_user(user, student, 'student')
if ret:
print("*" * 25 + '\n登录成功!%s同学!' % user)
return ret
else:
print('账号不存在!')
else:
log.warning('您的3次尝试机会已用完,谢谢使用!')
return False
if __name__ == '__main__':
print(Certification().login)
| [
"[email protected]"
] | |
7284c0b3e3f5f25dae2144460b7e2736b30e6aea | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03326/s393771722.py | df69bc9b8b591bef556ca44e75fdf6235600e367 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | N,M=map(int,input().split())
ans1=[]
ans2=[]
ans3=[]
ans4=[]
ans5=[]
ans6=[]
ans7=[]
ans8=[]
ANS1=0
ANS2=0
ANS3=0
ANS4=0
ANS5=0
ANS6=0
ANS7=0
ANS8=0
for i in range(N):
A,B,C=map(int,input().split())
ans1.append(A+B+C)
ans2.append(-A+B+C)
ans3.append(A-B+C)
ans4.append(A+B-C)
ans5.append(-A-B+C)
ans6.append(A-B-C)
ans7.append(-A+B-C)
ans8.append(-A-B-C)
re_ans1=sorted(ans1)
re_ans2=sorted(ans2)
re_ans3=sorted(ans3)
re_ans4=sorted(ans4)
re_ans5=sorted(ans5)
re_ans6=sorted(ans6)
re_ans7=sorted(ans7)
re_ans8=sorted(ans8)
for j in range(1,M+1):
ANS1+=re_ans1[(-1)*j]
ANS2+=re_ans2[(-1)*j]
ANS3+=re_ans3[(-1)*j]
ANS4+=re_ans4[(-1)*j]
ANS5+=re_ans5[(-1)*j]
ANS6+=re_ans6[(-1)*j]
ANS7+=re_ans7[(-1)*j]
ANS8+=re_ans8[(-1)*j]
print(max(ANS1,ANS2,ANS3,ANS4,ANS5,ANS6,ANS7,ANS8)) | [
"[email protected]"
] | |
caa596988026c177b5d1631eda972783f68d471b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_fuzes.py | 8286425583021c697aa4a9041dbe2357f3c0b7ed | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _FUZES():
def __init__(self,):
self.name = "FUZES"
self.definitions = fuze
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['fuze']
| [
"[email protected]"
] | |
9006b74c31a0bbba91bf577dd8129129896bca4b | d74ccf6290b7acb0011fd9b9132cd8beac0bd9d3 | /back/movies/views.py | e3db8ef93368b3d5064833a71fab135bd04f821a | [] | no_license | gaberani/final_netflix | a0687c9cec9157712c9fe2a8627d3624e5fe00b6 | 637016fd6a0c589f1ff96ed5e9225deffc8f18cb | refs/heads/master | 2022-11-09T10:42:22.460795 | 2020-06-21T00:30:21 | 2020-06-21T00:30:21 | 272,981,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,769 | py | from django.shortcuts import render,get_object_or_404,get_list_or_404
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from .models import Movie, Comment, Genre
from django.db.models import Q,Avg,Sum
from .serializers import MovieSerializer, MovieListSerializer, CommentSerializer, GenreSerializer
# Get All Movies
@api_view(['GET'])
def index(request):
movies = Movie.objects.all()
serializer = MovieListSerializer(movies)
return Response(serializer.data)
# Get Detail Movie
@api_view(['GET'])
def detail(request, movie_pk):
movie = get_object_or_404(Movie, pk=movie_pk)
serializer=MovieSerializer(movie)
return Response(serializer.data)
# Register Comments
@api_view(['POST'])
# @permission_classes([IsAuthenticated])
def create(request,movie_pk):
serializer = CommentSerializer(data=request.data)
comments = Comment.objects.filter(movie_id=movie_pk)
users = []
for comment in comments:
if comment.user not in users:
users.append(comment.user)
if request.user not in users:
if serializer.is_valid(raise_exception=True):
serializer.save(user=request.user, movie_id=movie_pk)
return Response(serializer.data)
else:
return Response({'Messages': '이미 평점을 작성한 영화입니다.'})
# Recommend Movies
@api_view(['GET'])
def recommendMovies(request):
comments=Comment.objects.filter(user=request.user).order_by('-rating')
new_genres = {}
for comment in comments:
movie = get_object_or_404(Movie,id=comment.movie_id)
serializer=MovieSerializer(movie)
for genre in serializer.data['genres']:
if genre not in new_genres:
if comment.rating:
new_genres[genre] =comment.rating
else:
if comment.rating:
new_genres[genre] += comment.rating
new_genres=sorted(new_genres.items(), key=lambda x:x[1],reverse=True)
recommend={}
# Genres Ordered By User Ratings
for k,v in new_genres:
movies=Movie.objects.filter(genres=k)
# Movies in Genres
for i in movies:
total=0
# Summation of Comments Ratings
ratings=Comment.objects.filter(movie_id=i.id)
if ratings:
for k in ratings:
total+=ratings[0].rating
if total:
recommend[i.id]=total
recommend=sorted(recommend.items(),key=lambda x:x[1],reverse=True)
if len(recommend)<5:
movie=Movie.objects.order_by("?")[:5]
serializer=MovieSerializer(movie,many=True)
else:
result=[]
for i in recommend[:5]:
result.append(i[0])
movie=Movie.objects.filter(id__in=result)
serializer=MovieSerializer(movie,many=True)
return Response(serializer.data)
# Update and Delete Comments
@api_view(['PUT', 'DELETE'])
def comment_update_and_delete(request,movie_pk,comment_pk):
comment=get_object_or_404(Comment,pk=comment_pk)
if request.user == comment.user:
print('User OK')
if request.method=='PUT':
serializer=CommentSerializer(data=request.data,instance=comment)
print('Method PUT')
if serializer.is_valid(raise_exception=True):
print('is_valid OK')
serializer.save()
return Response(serializer.data)
else:
print('Method DELETE')
comment.delete()
return Response({'message':'삭제 완료!'})
else:
return Response({'message': '다른 사용자는 불가합니다'})
# Popular Top3
@api_view(['GET'])
def many3(request):
many3 = Movie.objects.order_by('-popularity')[:3]
serializer = MovieSerializer(many3, many=True)
return Response(serializer.data)
# Top Rated Top3
@api_view(['GET'])
def top3(request):
top3 = Movie.objects.order_by('-vote_average')[:3]
serializer = MovieSerializer(top3, many=True)
return Response(serializer.data)
# Movie Search
@api_view(['GET'])
def search(request,movie_title):
movie=Movie.objects.filter(title__contains=movie_title)
if movie.exists():
serializer=MovieSerializer(movie,many=True)
return Response(serializer.data)
else:
return Response({'message':'No Result'})
@api_view(['POST'])
def wannawatch(request, movie_pk):
movie = get_object_or_404(Movie,id=movie_pk)
if movie.like_users.filter(pk=request.user.id).exists():
movie.like_users.remove(request.user)
else:
movie.like_users.add(request.user)
serializer=MovieSerializer(movie)
return Response(serializer.data)
# return Response({'message': '필요없음'})
@api_view(['GET'])
def confirmWatch(request,movie_pk):
movie=get_object_or_404(Movie,id=movie_pk)
if request.user in movie.like_users.all():
return Response({'result':1})
else:
return Response({'result':0})
@api_view(['GET'])
# @permission_classes([IsAuthenticated])
def getwannawatch(request):
user=request.user
movies=user.like_movies.all()
serializer=MovieListSerializer(movies,many=True)
return Response(serializer.data)
# Find All Genres
@api_view(['GET'])
def findgenre(request):
genre=Genre.objects.all()
serializer=GenreSerializer(genre,many=True)
return Response(serializer.data)
# Find Specific Genres
@api_view(['GET'])
def getGenre(request,genre_id):
movies=Movie.objects.filter(genres=genre_id)
print(len(movies))
serializer=MovieSerializer(movies,many=True)
return Response(serializer.data) | [
"[email protected]"
] | |
7001df5b1c8067eb4650e631e6538486f1968571 | 846b11ccf549aba144c1824a24292a4850860ca7 | /3-EstruturaDeRepeticao/14.py | 6bdc78dafd567fe611d5f74f89ef5b66eb0168d1 | [] | no_license | paulocesarcsdev/ExerciciosPython | 6d1feff293e7efc4cd3fbc62eee0add93f76db99 | 25bfaa6dc5cb294242e478a2b253a8ca5d9c7078 | refs/heads/master | 2023-05-15T00:53:22.151884 | 2021-06-10T03:04:04 | 2021-06-10T03:04:04 | 337,847,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | '''
Faça um programa que peça 10 números inteiros, calcule e
mostre a quantidade de números pares e a quantidade de números impares.
'''
lista = []
contador = 0
while(contador <= 10):
numero = int(input('Entre com dez {} números inteiros :'.format(contador)))
lista.append(numero)
for i in range(len(lista)):
if(i % 2 != 0):
print(i)
contador += 1 | [
"[email protected]"
] | |
bbe95919f6a86c4969e52c8d4b83fac015417ecf | 21b632797ed6257b13574c341cdd14e6534728a9 | /ryu/tests/unit/app/test_ofctl_rest.py | e9c64d2ca45a8b9816adb0f84b00b81ef71a983e | [
"Apache-2.0"
] | permissive | MrCocoaCat/ryu | 0473f04e2a840e027e9002f8a6af81745eaf7094 | 9e9571991a73380099b7ba7c6f37e0e587080a6a | refs/heads/master | 2021-06-19T18:09:52.833590 | 2020-05-12T08:17:21 | 2020-05-12T08:17:21 | 163,072,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import functools
import json
import logging
import os
import sys
import unittest
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
from nose.tools import eq_
from ryu.app import ofctl_rest
from ryu.app.wsgi import Request
from ryu.app.wsgi import WSGIApplication
from ryu.controller.dpset import DPSet
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_5
from ryu.tests import test_lib
LOG = logging.getLogger(__name__)
class DummyDatapath(ofproto_protocol.ProtocolDesc):
def __init__(self, version):
super(DummyDatapath, self).__init__(version)
self.id = 1
_kw = {'port_no': 1, 'hw_addr': 'aa:bb:cc:dd:ee:ff',
'name': 's1-eth1', 'config': 1, 'state': 1}
# for OpenFlow 1.0
if version in [ofproto_v1_0.OFP_VERSION]:
_kw.update(
{'curr': 2112, 'advertised': 0, 'supported': 0, 'peer': 0})
port_info = self.ofproto_parser.OFPPhyPort(**_kw)
# for OpenFlow 1.2 or 1.3
elif version in [ofproto_v1_2.OFP_VERSION, ofproto_v1_3.OFP_VERSION]:
_kw.update(
{'curr': 2112, 'advertised': 0, 'supported': 0, 'peer': 0,
'curr_speed': 10000000, 'max_speed': 0})
port_info = self.ofproto_parser.OFPPort(**_kw)
# for OpenFlow 1.4+
else:
_kw.update({'properties': []})
port_info = self.ofproto_parser.OFPPort(**_kw)
self.ports = {1: port_info}
class Test_ofctl_rest(unittest.TestCase):
def _test(self, name, dp, method, path, body):
# print('processing %s ...' % name)
dpset = DPSet()
dpset._register(dp)
wsgi = WSGIApplication()
contexts = {
'dpset': dpset,
'wsgi': wsgi,
}
ofctl_rest.RestStatsApi(**contexts)
req = Request.blank(path)
req.body = json.dumps(body).encode('utf-8')
req.method = method
with mock.patch('ryu.lib.ofctl_utils.send_stats_request'),\
mock.patch('ryu.lib.ofctl_utils.send_msg'):
res = req.get_response(wsgi)
eq_(res.status, '200 OK')
def _add_tests():
_ofp_vers = {
'of10': ofproto_v1_0.OFP_VERSION,
'of12': ofproto_v1_2.OFP_VERSION,
'of13': ofproto_v1_3.OFP_VERSION,
'of14': ofproto_v1_4.OFP_VERSION,
'of15': ofproto_v1_5.OFP_VERSION,
}
this_dir = os.path.dirname(sys.modules[__name__].__file__)
ofctl_rest_json_dir = os.path.join(this_dir, 'ofctl_rest_json/')
for ofp_ver in _ofp_vers:
# read a json file
json_path = os.path.join(ofctl_rest_json_dir, ofp_ver + '.json')
if os.path.exists(json_path):
_test_cases = json.load(open(json_path))
else:
# print("Skip to load test cases for %s" % ofp_ver)
continue
# add test
for test in _test_cases:
method = test['method']
path = test['path']
body = test.get('body', {})
name = 'test_ofctl_rest_' + method + '_' + ofp_ver + '_' + path
# print('adding %s ...' % name)
f = functools.partial(
Test_ofctl_rest._test,
name=name,
dp=DummyDatapath(_ofp_vers[ofp_ver]),
method=test['method'],
path=test['path'],
body=body
)
test_lib.add_method(Test_ofctl_rest, name, f)
_add_tests()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
0c87c480f5967e98463f781434699181e8465a21 | 429211c01057abcd51e5120d566c7daa0a8e2f33 | /1804/二/day9/gun.py | 708ce593b58389a50875961117c80df6abc145ac | [] | no_license | LDZ-RGZN/b1804 | 2788c922a6d1a6dc11267920a90336d1df93a453 | c57f8b7cf14686036cae3c30a30f07514622b5ca | refs/heads/master | 2021-07-19T12:54:07.031858 | 2018-10-12T02:48:39 | 2018-10-12T02:48:39 | 133,500,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | #!/usr/bin/env python
# coding=utf-8
#老王开抢
#四大类 人类 抢类 弹夹类 子弹类
class Ren:
def __init__(self,name):
self.namd = name
self.xue = 100
self.qiang = None
def __str__(self):
return self.name + '剩余血量为:' + str(self.xue)
def anzidan(self,danjia,zidan):
danjia.baocunzidan(zidan)
def andanjia(self,qiang,danjia):
qiang.lianjiedanjia(danjia)
def naqiang(self,qiang):
self.qiang = qiang
def kaiqing(self,diren):
self.qiang.she(diren)
def diaoxue(self,shashangli):
self.xue -= shashangli
#弹夹类
class Danjia:
def __init__(self,rongliang):
self.rongliang = rongliang
self.ronglist = []
def __str__(self):
return "子弹数量为" + str(len(self.ronglist)) + "/" + str(self.rongliang)
def baocounzidan(self,zidan):
if len(self.ronglist) < self.rongliang:
self.ronglist.append(zidan)
def chuzidan(self):
if len(self.ronglist) > 0:
zidan = self.ronglist[-1]
self.ronglist.pop()
return zidan
else:
return None
#子弹类
class Zidan:
def __init__(self,shashangli):
self.shashangli = shashangli
def shanghai(self,diren):
diren.diaoxue(self.shashangli)
#枪
class Qiang:
def __init__(self):
self.danjian = None
def __str__(self):
if self.danjian:
return "枪当前有弹夹"
else:
return "枪当前没有弹夹"
def lianjiedanjia(self,danjia):
if not self.danjian:
self.danjian = danjia
def she(self,diren):
zidan = self.danjian.chuzidan()
if zidan:
zidan.shanghai(diren)
else:
print ("没有子弹了,放了空枪..")
#创建一个对象
laowang = Ren("老王")
#创建一个弹夹
danjia = Danjia(20)
print (danjia)
i = 0
while i < 5:
zidan = Zidan(5)
laowang.anzidan(danjia,zidan)
i += 1
print (danjia)
qiang = Qiang()
print (qiang)
laowanga.andanjia(qiang,danjia)
print(qiang)
diren = Ren("敌人")
print (diren)
laowang.kaiqiang(diren)
print(diren)
print(danjia)
laowang.kaiqiang(diren)
print (diren)
print (danjia)
| [
"[email protected]"
] | |
ae9104c7eda0a9bab8afe8a967157bb5b7283c3e | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part000047.py | db44526b51ca3eb6cfa5a431845cc6192b31309e | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher103223(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher103223._instance is None:
CommutativeMatcher103223._instance = CommutativeMatcher103223()
return CommutativeMatcher103223._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 103222
return
yield
from collections import deque | [
"[email protected]"
] | |
68e493b2d7f735d95ac4aa98be268df361a49576 | 51b6d2fc53d5c632fcf01319842baebf13901e84 | /atcoder.jp/arc119/arc119_b/Main.py | 4f5348383754ef09ebd1273a7cb050def7fe502a | [] | no_license | mono-0812/procon | 35db3b2c21eff74fbd7b52db07f249380f6834ef | 68a4b53880a228a0164052b23d1326363efcbc20 | refs/heads/master | 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | import bisect,collections,copy,heapq,itertools,math,string,sys,queue,time
from typing import Counter
input = lambda: sys.stdin.readline().rstrip()
start_time=time.time()
from decimal import Decimal
def I(): return input()
def IS(): return input().split()
def II(): return int(input())
def IIS(): return map(int,input().split())
def LIIS(): return list(map(int,input().split()))
def debug(*args): print(*args) if len(args)>0 else print(False);return
def nt(): print(time.time()-start_time);return
def comb(n, r):return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
def combinations_with_replacement_count(n, r):
return comb(n + r - 1, r)
def tr():pass
def try_run():
try:tr()
except:pass
def make_divisors(n):
lower_divisors , upper_divisors = [], []
i = 1
while i*i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n//i)
i += 1
return lower_divisors + upper_divisors[::-1]
def prime_factorize(n):
a = []
while n % 2 == 0:
a.append(2)
n //= 2
f = 3
while f * f <= n:
if n % f == 0:
a.append(f)
n //= f
else:
f += 2
if n != 1:
a.append(n)
return a
ragen=range
INF=10**18
MOD=998244353
##############################################################################
n=II()
s=I()
t=I()
a=[]
for i in range(n):
if s[i]=="0":
a.append(i)
b=[]
for i in range(n):
if t[i]=="0":
b.append(i)
if len(a)!=len(b):
print(-1)
exit()
ans=0
for i in range(len(a)):
if a[i]!=b[i]:
ans+=1
print(ans) | [
"[email protected]"
] | |
c2ad53098881126cf3216483b39d65efcb8d5136 | 807a9f48de01fe9c2ae8200dbce0f590dcc6d0a7 | /jd/api/rest/__init__.py | a29e7abd846b521334424ed92761313fefa929b0 | [
"MIT"
] | permissive | onsunsl/DjangoBlog | 51cb01082253ebf6010a7d57ba6ce838f4809461 | 1df83f7ac0ef95433dd5a68cd8d00d37f6e8d6bd | refs/heads/master | 2020-04-02T13:49:17.860496 | 2020-03-22T13:50:01 | 2020-03-22T13:50:01 | 154,498,765 | 0 | 0 | MIT | 2018-10-24T12:38:39 | 2018-10-24T12:38:39 | null | UTF-8 | Python | false | false | 1,352 | py | # 商品类目查询
from .jdUnionOpenCategoryGoodsGet import jdUnionOpenCategoryGoodsGet
# 优惠券领取情况查询接口
from .jdUnionOpenCouponQuery import jdUnionOpenCouponQuery
# 京粉精选商品查询接口
from .jdUnionOpenGoodsJingfenQuery import jdUnionOpenGoodsJingfenQuery
# 链接商品查询接口
from .jdUnionOpenGoodsLinkQuery import jdUnionOpenGoodsLinkQueryt
# 获取推广商品信息接口
from .jdUnionOpenGoodsPromotiongoodsinfoQuery import jdUnionOpenGoodsPromotiongoodsinfoQuery
# 关键词商品查询接口
from .jdUnionOpenGoodsQuery import jdUnionOpenGoodsQuery
# 秒杀商品查询接口
from .jdUnionOpenGoodsSeckillQuery import jdUnionOpenGoodsSeckillQuery
# 学生价商品查询接口
from .jdUnionOpenGoodsStupriceQuery import jdUnionOpenGoodsStupriceQuery
# 奖励订单查询接口
from .jdUnionOpenOrderBonusQuery import jdUnionOpenOrderBonusQuery
# 查询推广订单及佣金信息,会随着订单状态变化更新数据,支持按下单时间、完成时间或状态更新时间查询,
from .jdUnionOpenOrderQuery import jdUnionOpenOrderQuery
# 获取PID
from .jdUnionOpenUserPidGet import jdUnionOpenUserPidGet
# 通过unionId获取推广链接
from .jdUnionOpenPromotionByunionidGet import jdUnionOpenPromotionByunionidGet
from .jdUnionOpenoPromotionCommonGet import jdUnionOpenPromotionCommonGet
| [
"[email protected]"
] | |
c8b3f7877605ee22e4fb7352e90bf9d0438a9629 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/aphotomanager/testcase/firstcases/testcase5_011.py | c948b113972a58ee17f3e78f54b86d42dff91b4c | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,009 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.k3b.android.androFotoFinder',
'appActivity' : 'de.k3b.android.androFotoFinder.FotoGalleryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.k3b.android.androFotoFinder/de.k3b.android.androFotoFinder.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase011
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Delete\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/image\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Mais opções\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Copiar\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/cmd_any\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.8, 0.5, 0.2)
swipe(driver, 0.5, 0.8, 0.5, 0.2)
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/cmd_any\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/cmd_any\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"acct \")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sbin \")", "new UiSelector().className(\"android.widget.TextView\").instance(9)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Delete\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_011\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.k3b.android.androFotoFinder'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
67f834797b3b8e84a0342902ea5aaec610872b32 | 695803cf1ae81f7a8ad63faa80545c3c913cee02 | /Part1/week3/chapter11/exercise/employee_test_11.3/employee.py | 911dfb1dcd2147d814debf66f37c4af7a0925ca2 | [] | no_license | superstones/LearnPython | fa72a249a69323927da81887ce4b9f400552a1d0 | 5ea25f9b9922654d67c6b31475cdf02b9fe99c7e | refs/heads/master | 2023-06-24T10:40:18.639288 | 2021-07-22T08:34:11 | 2021-07-22T08:34:11 | 370,679,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | class Employee():
"""一个表示雇员的类"""
def __init__(self, first_name, last_name, salary):
"""初始化雇员"""
self.first_name = first_name.title()
self.last_name = last_name.title()
self.salary = salary
def give_raise(self, amount=5000):
self.salary += amount
| [
"[email protected]"
] | |
181b8daf2945fae532bbd054c7f7d63f2582dbe1 | 95c3c587907ae38b11faacc4d2ebe1df8f5b3335 | /ASSGN-FlowControl-Aug16-Q3-Jyoti.py | 2933fbd0945cae6d8492dde2787e2ed8aee49a60 | [] | no_license | sandhyalethakula/Iprimed_16_python | d59cb47d2d2a63c04a658c8b302505efc8f24ff4 | c34202ca155819747a5c5ac4a8a5511d425f41a1 | refs/heads/main | 2023-08-16T11:40:11.005919 | 2021-09-29T13:22:32 | 2021-09-29T13:22:32 | 411,956,547 | 0 | 0 | null | 2021-09-30T06:57:13 | 2021-09-30T06:57:12 | null | UTF-8 | Python | false | false | 702 | py | '''
1.ask the user to enter the username
2.if the username is admin then ask the user to enter the password.
3.if the password is admin123 then greet the user and end the program.
4.if the password is not admin123 then display wrong password.
3.if the username is not admin then ask the user to enter the username again.'''
while True:
UserName=input('enter the user name : ') #asks user to enter name
if UserName=='Jyoti':
password=input('enter the passsword : ') #asks user to enter pwd
if password =='jan369': #checks pwd
print('hello',UserName)
break
else:
print('wrong password')
| [
"[email protected]"
] | |
5c5f3cdad8b7993f138f1ac54983ef6ff5b698e7 | 170026ff5b435027ce6e4eceea7fff5fd0b02973 | /glycan_profiling/output/report/glycan_lcms/render.py | 69e118fb688103df4b9b16b637b8f25d227bd523 | [
"Apache-2.0"
] | permissive | mstim/glycresoft | 78f64ae8ea2896b3c4f4c185e069387824e6c9f5 | 1d305c42c7e6cba60326d8246e4a485596a53513 | refs/heads/master | 2022-12-24T23:44:53.957079 | 2020-09-29T13:38:20 | 2020-09-29T13:38:20 | 276,471,357 | 0 | 0 | NOASSERTION | 2020-07-01T20:04:43 | 2020-07-01T20:04:42 | null | UTF-8 | Python | false | false | 6,222 | py | import os
from glycan_profiling import serialize
from glycan_profiling.plotting import summaries, figax, SmoothingChromatogramArtist
from glycan_profiling.plotting.chromatogram_artist import ChargeSeparatingSmoothingChromatogramArtist
from glycan_profiling.scoring.utils import logit
from glycan_profiling.chromatogram_tree import ChromatogramFilter
from jinja2 import Markup, Template
from glycan_profiling.output.report.base import (
svguri_plot, ReportCreatorBase)
def chromatogram_figures(chroma):
figures = []
plot = SmoothingChromatogramArtist(
[chroma], colorizer=lambda *a, **k: 'green', ax=figax()).draw(
label_function=lambda *a, **k: "", legend=False).ax
plot.set_title("Aggregated\nExtracted Ion Chromatogram", fontsize=24)
chroma_svg = svguri_plot(
plot, bbox_inches='tight', height=5, width=9, svg_width="100%")
figures.append(chroma_svg)
if len(chroma.mass_shifts) > 1:
mass_shifts = list(chroma.mass_shifts)
labels = {}
rest = chroma
for mass_shift in mass_shifts:
with_mass_shift, rest = rest.bisect_mass_shift(mass_shift)
labels[mass_shift] = with_mass_shift
mass_shift_plot = SmoothingChromatogramArtist(
labels.values(),
colorizer=lambda *a, **k: 'green', ax=figax()).draw(
label_function=lambda *a, **k: tuple(a[0].mass_shifts)[0].name,
legend=False).ax
mass_shift_plot.set_title(
"mass_shift-Separated\nExtracted Ion Chromatogram", fontsize=24)
mass_shift_separation = svguri_plot(
mass_shift_plot, bbox_inches='tight', height=5, width=9, svg_width="100%")
figures.append(mass_shift_separation)
if len(chroma.charge_states) > 1:
charge_separating_plot = ChargeSeparatingSmoothingChromatogramArtist(
[chroma], ax=figax()).draw(
label_function=lambda x, *a, **kw: str(
tuple(x.charge_states)[0]), legend=False).ax
charge_separating_plot.set_title(
"Charge-Separated\nExtracted Ion Chromatogram", fontsize=24)
charge_separation = svguri_plot(
charge_separating_plot, bbox_inches='tight', height=5, width=9,
svg_width="100%")
figures.append(charge_separation)
return figures
def chromatogram_link(chromatogram):
id_string = str(chromatogram.id)
return Markup("<a href=\"#detail-{0}\">{1}</a>").format(id_string, str(chromatogram.key))
class GlycanChromatogramReportCreator(ReportCreatorBase):
def __init__(self, database_path, analysis_id, stream=None, threshold=5):
super(GlycanChromatogramReportCreator, self).__init__(
database_path, analysis_id, stream)
self.set_template_loader(os.path.dirname(__file__))
self.threshold = threshold
self.glycan_chromatograms = ChromatogramFilter([])
self.unidentified_chromatograms = ChromatogramFilter([])
def glycan_link(self, key):
match = self.glycan_chromatograms.find_key(key)
if match is not None:
return chromatogram_link(match)
match = self.unidentified_chromatograms.find_key(key)
if match is not None:
return chromatogram_link(match)
return None
def prepare_environment(self):
super(GlycanChromatogramReportCreator, self).prepare_environment()
self.env.filters["logit"] = logit
self.env.filters['chromatogram_figures'] = chromatogram_figures
self.env.filters['glycan_link'] = self.glycan_link
def make_template_stream(self):
template_obj = self.env.get_template("overview.templ")
ads = serialize.AnalysisDeserializer(
self.database_connection._original_connection,
analysis_id=self.analysis_id)
self.glycan_chromatograms = gcs = ads.load_glycan_composition_chromatograms()
# und = ads.load_unidentified_chromatograms()
self.unidentified_chromatograms = und = ChromatogramFilter(
ads.query(serialize.UnidentifiedChromatogram).filter(
serialize.UnidentifiedChromatogram.analysis_id == self.analysis_id).all())
if len(gcs) == 0:
self.log("No glycan compositions were identified. Skipping report building")
templ = Template('''
<html>
<style>
body {
font-family: sans-serif;
}
</style>
<body>
<h3>No glycan compositions were identified</h3>
</body>
</html>
''')
return templ.stream()
summary_plot = summaries.GlycanChromatographySummaryGraphBuilder(
filter(lambda x: x.score > self.threshold, gcs + und))
lcms_plot, composition_abundance_plot = summary_plot.draw(min_score=5)
try:
lcms_plot.ax.legend_.set_visible(False)
except AttributeError:
# The legend may not have been created
pass
lcms_plot.ax.set_title("Glycan Composition\nLC-MS Aggregated EICs", fontsize=24)
fig = lcms_plot.ax.figure
fig.set_figwidth(fig.get_figwidth() * 2.)
fig.set_figheight(fig.get_figheight() * 2.)
composition_abundance_plot.ax.set_title("Glycan Composition\nTotal Abundances", fontsize=24)
composition_abundance_plot.ax.set_xlabel(
composition_abundance_plot.ax.get_xlabel(), fontsize=14)
def resolve_key(key):
match = gcs.find_key(key)
if match is None:
match = und.find_key(key)
return match
template_stream = (template_obj.stream(
analysis=ads.analysis, lcms_plot=svguri_plot(
lcms_plot.ax, bbox_inches='tight', patchless=True,
svg_width="100%"),
composition_abundance_plot=svguri_plot(
composition_abundance_plot.ax, bbox_inches='tight', patchless=True,
svg_width="100%"),
glycan_chromatograms=gcs,
unidentified_chromatograms=und,
resolve_key=resolve_key
))
return template_stream
| [
"[email protected]"
] | |
4027b8b586e455538c009e3f9d23e2020e8e842c | ac9b8a7b6a84a9abc357fc0904459008a90a55b4 | /Model/MarkMethodPixelOffset.py | 58aaaf225b8b2ce1dd41505fe21122bc362ba78c | [] | no_license | alex-ong/TFRevolution | a8a242e657cb9318d0ce8b6b013b2c2a4c911468 | f321a182a9b08b65c22b507bbd221c5e7c8c2d58 | refs/heads/master | 2021-04-27T08:00:16.931824 | 2018-09-13T14:50:40 | 2018-09-13T14:50:40 | 122,644,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | from Model.PlayerData import PlayerData
def hexNoLeader(number):
return hex(number).replace("0x", "")
def ToHex(numbers):
return ('#' + hexNoLeader(numbers[0]).zfill(2) +
hexNoLeader(numbers[1]).zfill(2) +
hexNoLeader(numbers[2]).zfill(2))
#method for marking an image based purely on pixel offsets.
def markPlayerPreview(pixels, imgsize, startOffset, garbageOffset, gs):
markColor = (255, 255, 255)
garboColor = (0, 255, 0)
w, h = imgsize
for y in range(20):
yPix = round(y * gs + startOffset[1])
if yPix >= h:
break
for x in range(10):
xPix = round(x * gs + startOffset[0])
if xPix >= w:
break
pixels[xPix, yPix] = markColor
xPix = round(x * gs + startOffset[0] + garbageOffset)
if xPix >= w:
continue
pixels[xPix, yPix] = garboColor
def markImagePreview(fullImageMarker, image):
pixels = image.load()
startOffset = [20, 20] # magic number :(
garbageOffset = fullImageMarker.WindowSettings.garbageXOffset
PixelOffsetArgs = (pixels, image.size, startOffset, garbageOffset, fullImageMarker.WindowSettings.gridSize)
# mark player 1
markPlayerPreview(*PixelOffsetArgs)
startOffset[0] += fullImageMarker.WindowSettings.playerDistance
# mark player 2
markPlayerPreview(*PixelOffsetArgs)
# Section below is marking for output to external programs
def markImageOutput(imageMarker, image):
pixels = image.load()
garbageOffset = imageMarker.WindowSettings.garbageXOffset
startOffset = [20, 20] # magic number :(
# mark player 1
for player in imageMarker.data:
markPlayerOutput(imageMarker, player, pixels, image.size, garbageOffset, startOffset)
startOffset[0] += imageMarker.WindowSettings.playerDistance
def markPlayerOutput(imageMarker, player, pixels, imgsize, garbageOffset, startOffset):
player.resetGarbage()
gs = imageMarker.WindowSettings.gridSize
w, h = imgsize
y = 0
x = 0
for y in range(PlayerData.MATRIX_Y):
yPix = round(y * gs + startOffset[1])
if yPix >= h:
break
for x in range(PlayerData.MATRIX_X):
xPix = round(x * gs + startOffset[0])
if xPix >= w:
break
player.updateField(x, y, ToHex(pixels[xPix, yPix]))
# garbage detection
for y in range(PlayerData.MATRIX_Y - 1, -1, -1):
yPix = round(y * gs + startOffset[1])
xPix = round(x * gs + startOffset[0] + garbageOffset)
if xPix >= w or yPix >= h:
continue
player.updateGarbage(20 - y, pixels[xPix, yPix]) | [
"[email protected]"
] | |
e532c4a0865c915ecd68f08f632acc6da4255359 | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/sw_expert_academy/code_problem/D5/1256.K번째 접미어/1256.py | 99e4ac4b61ae293181607c9a7012cb3c05371e32 | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 647 | py | import sys
sys.stdin = open('input_1256.txt', 'r')
for test_case in range(int(input())):
alphabet = [[] for _ in range(26)]
K = int(input())
string_data = input()
for i in range(len(string_data)):
sub_data = string_data[i:]
if sub_data not in alphabet[ord(sub_data[0]) - 97]:
alphabet[ord(sub_data[0]) - 97].append(sub_data)
print(alphabet)
order = 0
for alpha in alphabet:
order += len(alpha)
if order >= K:
print('#{} {}'.format(test_case + 1, sorted(alpha, reverse=True)[order - K]))
break
else:
print('#{} none'.format(test_case + 1)) | [
"[email protected]"
] | |
7e2dbeb214ee92c378f430cf5cc247563ad2cd30 | 412a330e85ad845a79732277e291acb087d7caaa | /src/bs_basic/demo03.py | b238facde660a7b386d60d19f49a7941aed3e694 | [] | no_license | zhaopufeng/python_scrawler | d3f1284d0f25d6c09fb1c0c35f7c0c72e6c7602e | 04efb6ea7646ccc9281244468d892519c5a46d2d | refs/heads/master | 2022-01-08T13:38:32.020065 | 2019-05-29T00:02:38 | 2019-05-29T00:02:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # 读取和设置节点的属性
html = '''
<html>
<head><title>index</title></head>
<body attr='test xyz' class='style1 style2'>
<a rel='ok1 ok2 ok3' class='a1 a2' href='a.html'>first page</a>
<p>
<a href='b.html'>second page</a>
<p>
<a href='c.html'>third page</a>
<p>
<x k='123' attr1='hello' attr2='world'>hello</x>
</body>
</html>
'''
from bs4 import *
soup = BeautifulSoup(html,'lxml')
print(type(soup.body.attrs))
print('body.class','=',soup.body['class'])
print('body.attr','=',soup.body['attr'])
print('a.class','=',soup.a['class'])
print('x.attr1','=',soup.x['attr1'])
soup.body['class'] = ['x','y','z']
#print(soup.body)
#soup.body['class'] = 'xyz123 uio'
#print(soup.body)
soup.body['class'].append('ok')
print(soup.body)
#soup.body['ok'] = '443'
#del soup.body['class']
#print(soup.body)
print(soup.a['rel'])
# rel,rev,accept-charset,headers,accesskey
| [
"[email protected]"
] | |
70ed2e78c1bdba1fb9adfa58a67b26b2e1e983e8 | fc8137f6a4df69640657a0af5d7201de3c6eb261 | /accepted/LRU Cache.py | 18dd102997ba63e17d76e3d14388bc4477cb9995 | [] | no_license | hustlrr/leetcode | 68df72b49ee3bbb9f0755028e024cc9fea2c21aa | 56e33dff3918e371f14d6f7ef03f8951056cc273 | refs/heads/master | 2020-04-12T08:14:25.371761 | 2017-01-01T12:19:34 | 2017-01-01T12:19:34 | 77,119,341 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # coding=utf-8
from collections import OrderedDict
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.cache = OrderedDict()
def get(self, key):
"""
:rtype: int
"""
value = self.cache.pop(key, default =None)
if value is None:
return -1
self.cache[key] = value
return value
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: nothing
"""
if self.cache.pop(key, default=None) is None and len(self.cache) == self.capacity:
self.cache.popitem(last=False) # 先进先出
self.cache[key] = value
| [
"[email protected]"
] | |
e19465b00dea86d31c9705849f57f03e7d300d22 | ba602dc67ad7bb50133aeb312f3c6c54627b3dec | /data/3919/WA_py/508306.py | 1daf789b3af32025e1f3a08fbb540b77f5386101 | [] | no_license | Dearyyyyy/TCG | 0d21d89275906157372d775f33309ce337e6bc95 | 7b80de16de2d3f5d95a7c4ed95d45a9e38882e67 | refs/heads/master | 2020-12-27T23:19:44.845918 | 2020-02-04T01:59:23 | 2020-02-04T01:59:23 | 238,101,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | # coding=utf-8
a,b,c=input().split()
d,e,f=input().split()
a=int(a)
b=int(b)
c=int(c)
d=int(d)
e=int(e)
f=int(f)
if a+b>c and a+c>b and c+b>a:
if a==b or a==c or b==c:
print("DY")
elif a*a+b*b==c*c or a*a+c*c==b*b or b*b+c*c==a*a:
print("ZJ")
elif a==b==c:
print("DB")
else:
print("PT")
else:
print("ERROR")
while True:
if d+e>f and d+f>e and e+f>d:
if d==e or d==f or e==f:
print("DY")
elif d*d+e*e==f*f or d*d+f*f==e*e or f*f+e*e==d*d:
print("ZJ")
elif d==e==f:
print("DB")
else:
print("PT")
else:
print("ERROR")
break | [
"[email protected]"
] | |
1224931d246e52a66ddfe9428645e7a3d0e3fcee | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_4335.py | 454b8cb811aaf1aaa5ceae079d6b859bb77e86e8 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | # returning a single instance of a regex object's contents
line.replace('<a href="' + test_str + '">', '<a href="' + re_string + '">')
| [
"[email protected]"
] | |
1a3a8531445dfd974cb045babbdf1ceea1b12e4d | 084177c601eeb7ce99a343b94cbad8eb15cb7f95 | /flask/jinja2/sample/sample.py | f3f84ff17d340f59d35709ea8617ea5f1d481738 | [] | no_license | CaesarLinsa/flask_learn_note | bb82360c1ca15a48ba136c460b6b6159a8ff4034 | 260ae68ed6494f995a75f21f16fc493d10031a2a | refs/heads/master | 2022-09-28T03:38:23.626341 | 2020-02-07T08:42:52 | 2020-02-07T08:42:52 | 237,619,223 | 0 | 0 | null | 2022-09-16T18:17:46 | 2020-02-01T13:30:34 | Python | UTF-8 | Python | false | false | 1,176 | py | # -*- coding: utf-8 -*-
from flask import Flask, render_template, request
from flask_script import Manager
from livereload import Server
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_nav.elements import *
app = Flask(__name__)
manager = Manager(app)
Bootstrap(app)
nav = Nav()
nav.register_element('top', Navbar('Flask入门',
View('主页', 'index'),
View('登录', 'login')
))
nav.init_app(app)
@app.route("/")
def index():
return render_template("index.html", title="hello world")
@app.route('/login', methods=["GET", "POST"])
def login():
from forms import LoginForm
form = LoginForm()
if request.method == "POST":
form_obj = LoginForm(request.form)
if form_obj.validate():
ers = request.form.to_dict()
print(ers)
print(form_obj.data)
return "登录成功"
return render_template("login.html", form=form)
if __name__ == '__main__':
live_server = Server(app.wsgi_app)
live_server.watch('**/*.*')
live_server.serve(open_url_delay=True)
| [
"[email protected]"
] | |
c19c4b35c726c1dfed536061da95efa490d4e473 | a110cda0dd755a0aeeccaa349de5b7c8f836f7d9 | /005_PrintingPDFs/renameFiles.py | 87db8921f363b65d6f0c00339428f095356404ef | [] | no_license | ksobon/archi-lab | 26d93ef07e4f571e73a78bc40299edd3dc84c2a6 | 9a8a57eccca899ace78a998dc7698ff7754fae6b | refs/heads/master | 2021-01-15T09:37:06.045588 | 2020-06-03T15:55:46 | 2020-06-03T15:55:46 | 26,090,112 | 6 | 5 | null | 2020-02-09T04:24:41 | 2014-11-02T19:02:28 | Python | UTF-8 | Python | false | false | 1,422 | py | #Copyright(c) 2015, Konrad Sobon
# @arch_laboratory, http://archi-lab.net
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
uiapp = DocumentManager.Instance.CurrentUIApplication
app = uiapp.Application
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
from Autodesk.Revit import *
import System
import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import os
filePath = IN[0]
identifiers = IN[1]
newNames = IN[2]
RunIt = IN[3]
files = os.listdir(filePath)
if RunIt:
message = "Success"
for file in files:
currentFileName = filePath + "\\" + file
for i, j in zip(identifiers, newNames):
newFileName = filePath + "\\" + j
if i in file and currentFileName != newFileName:
try:
os.rename(currentFileName, newFileName)
except:
message = "Your intended file name is not a compatible file name. Make sure that you are not strings like..."
pass
else:
message = "Please set RunIt to True."
#docName = uiapp.ActiveUIDocument.Document.Title
#Assign your output to the OUT variable
OUT = message
| [
"[email protected]"
] | |
db06647a6981e938a79c48c2fd34c1b303e5446e | 80453e94cfcda83b5796f17f5e3a5a8616c50288 | /network_ui_dev/migrations/0001_initial.py | d1e1eca0100a14c9a511a5cae05346a0d751324e | [
"Apache-2.0"
] | permissive | benthomasson/network-ui | eaca0ab0ed8a5ab3ba9d2e2aab81b1e38248b649 | 8c3e114d331a57848acb4004ccf090148fd36e86 | refs/heads/react | 2023-01-13T07:56:38.914187 | 2019-06-19T14:58:56 | 2019-06-19T14:58:56 | 145,854,921 | 1 | 1 | Apache-2.0 | 2023-01-06T01:08:15 | 2018-08-23T13:07:43 | HTML | UTF-8 | Python | false | false | 12,004 | py | # Generated by Django 2.0.8 on 2018-08-23 16:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('client_id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='CodeUnderTest',
fields=[
('code_under_test_id', models.AutoField(primary_key=True, serialize=False)),
('version_x', models.IntegerField()),
('version_y', models.IntegerField()),
('version_z', models.IntegerField()),
('commits_since', models.IntegerField()),
('commit_hash', models.CharField(blank=True, max_length=40)),
],
),
migrations.CreateModel(
name='Coverage',
fields=[
('coverage_id', models.AutoField(primary_key=True, serialize=False)),
('coverage_data', models.TextField()),
],
),
migrations.CreateModel(
name='Device',
fields=[
('device_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
('x', models.IntegerField()),
('y', models.IntegerField()),
('id', models.IntegerField()),
('device_type', models.CharField(blank=True, max_length=200)),
('interface_id_seq', models.IntegerField(default=0)),
('process_id_seq', models.IntegerField(default=0)),
('host_id', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='EventTrace',
fields=[
('event_trace_id', models.AutoField(primary_key=True, serialize=False)),
('trace_session_id', models.IntegerField(default=0)),
('event_data', models.TextField()),
('message_id', models.IntegerField()),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Client')),
],
),
migrations.CreateModel(
name='FSMTrace',
fields=[
('fsm_trace_id', models.AutoField(primary_key=True, serialize=False)),
('fsm_name', models.CharField(blank=True, max_length=200)),
('from_state', models.CharField(blank=True, max_length=200)),
('to_state', models.CharField(blank=True, max_length=200)),
('message_type', models.CharField(blank=True, max_length=200)),
('trace_session_id', models.IntegerField(default=0)),
('order', models.IntegerField(default=0)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Client')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('group_id', models.AutoField(primary_key=True, serialize=False)),
('id', models.IntegerField()),
('name', models.CharField(blank=True, max_length=200)),
('x1', models.IntegerField()),
('y1', models.IntegerField()),
('x2', models.IntegerField()),
('y2', models.IntegerField()),
('group_type', models.CharField(blank=True, max_length=200)),
('inventory_group_id', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='GroupDevice',
fields=[
('group_device_id', models.AutoField(primary_key=True, serialize=False)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Device')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Group')),
],
),
migrations.CreateModel(
name='Interface',
fields=[
('interface_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
('id', models.IntegerField()),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Device')),
],
),
migrations.CreateModel(
name='Link',
fields=[
('link_id', models.AutoField(primary_key=True, serialize=False)),
('id', models.IntegerField()),
('name', models.CharField(blank=True, max_length=200)),
('from_device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_link', to='network_ui_dev.Device')),
('from_interface', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_link', to='network_ui_dev.Interface')),
('to_device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_link', to='network_ui_dev.Device')),
('to_interface', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_link', to='network_ui_dev.Interface')),
],
),
migrations.CreateModel(
name='MessageType',
fields=[
('message_type_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
],
),
migrations.CreateModel(
name='Process',
fields=[
('process_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
('process_type', models.CharField(blank=True, max_length=200)),
('id', models.IntegerField(default=0)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Device')),
],
),
migrations.CreateModel(
name='Result',
fields=[
('result_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=20)),
],
),
migrations.CreateModel(
name='Stream',
fields=[
('stream_id', models.AutoField(primary_key=True, serialize=False)),
('label', models.CharField(blank=True, max_length=200)),
('id', models.IntegerField(default=0)),
('from_device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_stream', to='network_ui_dev.Device')),
('to_device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_stream', to='network_ui_dev.Device')),
],
),
migrations.CreateModel(
name='TestCase',
fields=[
('test_case_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
('test_case_data', models.TextField()),
],
),
migrations.CreateModel(
name='TestResult',
fields=[
('test_result_id', models.AutoField(primary_key=True, serialize=False)),
('time', models.DateTimeField()),
('id', models.IntegerField(default=0)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Client')),
('code_under_test', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.CodeUnderTest')),
('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Result')),
('test_case', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.TestCase')),
],
),
migrations.CreateModel(
name='Toolbox',
fields=[
('toolbox_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
],
),
migrations.CreateModel(
name='ToolboxItem',
fields=[
('toolbox_item_id', models.AutoField(primary_key=True, serialize=False)),
('data', models.TextField()),
('toolbox', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Toolbox')),
],
),
migrations.CreateModel(
name='Topology',
fields=[
('topology_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200)),
('scale', models.FloatField()),
('panX', models.FloatField()),
('panY', models.FloatField()),
('device_id_seq', models.IntegerField(default=0)),
('link_id_seq', models.IntegerField(default=0)),
('group_id_seq', models.IntegerField(default=0)),
('stream_id_seq', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='TopologyHistory',
fields=[
('topology_history_id', models.AutoField(primary_key=True, serialize=False)),
('message_id', models.IntegerField()),
('message_data', models.TextField()),
('undone', models.BooleanField(default=False)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Client')),
('message_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.MessageType')),
('topology', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Topology')),
],
),
migrations.CreateModel(
name='TopologyInventory',
fields=[
('topology_inventory_id', models.AutoField(primary_key=True, serialize=False)),
('inventory_id', models.IntegerField()),
('topology', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Topology')),
],
),
migrations.CreateModel(
name='TopologySnapshot',
fields=[
('topology_snapshot_id', models.AutoField(primary_key=True, serialize=False)),
('topology_id', models.IntegerField()),
('trace_session_id', models.IntegerField()),
('snapshot_data', models.TextField()),
('order', models.IntegerField()),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Client')),
],
),
migrations.AddField(
model_name='group',
name='topology',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Topology'),
),
migrations.AddField(
model_name='device',
name='topology',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.Topology'),
),
migrations.AddField(
model_name='coverage',
name='test_result',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network_ui_dev.TestResult'),
),
]
| [
"[email protected]"
] | |
e9bf9486a87bbe6c79f4f9baac13b0737938295b | 9a553930cf5fc5c9a39cbf2373f9a16b6a3461f7 | /example03/blog/views.py | 86dbf1b2bd3da2459a45de85d94300affe07e4de | [] | no_license | lee-seul/django_example | 8ad45ad277d2e69b0108b7609be7fd37de6540f2 | bf2736e42f0a03e603f5a34eab89bae0ed43d0a3 | refs/heads/master | 2021-01-12T06:30:18.723640 | 2017-05-17T09:55:43 | 2017-05-17T09:55:43 | 77,370,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from django.views.generic import ListView, DetailView
from blog.models import Post
class PostLV(ListView):
model = Post
template_name = 'blog/post_all.html'
context_object_name = 'posts'
paginate_by = 2
class PostDV(DetailView):
model = Post
| [
"[email protected]"
] | |
c546d06a29da7625de0d5f87dedab6c1c3b88244 | 90f52d0348aa0f82dc1f9013faeb7041c8f04cf8 | /wxPython3.0 Docs and Demos/demo/DelayedResult.py | a8342a9a2c2f81a93a8e289ffba1eaa8a9ed8cce | [] | no_license | resource-jason-org/python-wxPythonTool | 93a25ad93c768ca8b69ba783543cddf7deaf396b | fab6ec3155e6c1ae08ea30a23310006a32d08c36 | refs/heads/master | 2021-06-15T10:58:35.924543 | 2017-04-14T03:39:27 | 2017-04-14T03:39:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,014 | py | """
This demonstrates a simple use of delayedresult: get/compute
something that takes a long time, without hanging the GUI while this
is taking place.
The top button runs a small GUI that uses wx.lib.delayedresult.startWorker
to wrap a long-running function into a separate thread. Just click
Get, and move the slider, and click Get and Abort a few times, and
observe that GUI responds. The key functions to look for in the code
are startWorker() and __handleResult().
The second button runs the same GUI, but without delayedresult. Click
Get: now the get/compute is taking place in main thread, so the GUI
does not respond to user actions until worker function returns, it's
not even possible to Abort.
"""
import wx
import wx.lib.delayedresult as delayedresult
class FrameSimpleDelayedBase(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
pnl = wx.Panel(self)
self.checkboxUseDelayed = wx.CheckBox(pnl, -1, "Using delayedresult")
self.buttonGet = wx.Button(pnl, -1, "Get")
self.buttonAbort = wx.Button(pnl, -1, "Abort")
self.slider = wx.Slider(pnl, -1, 0, 0, 10, size=(100,-1),
style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS)
self.textCtrlResult = wx.TextCtrl(pnl, -1, "", style=wx.TE_READONLY)
self.checkboxUseDelayed.SetValue(1)
self.checkboxUseDelayed.Enable(False)
self.buttonAbort.Enable(False)
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
vsizer.Add(self.checkboxUseDelayed, 0, wx.ALL, 10)
hsizer.Add(self.buttonGet, 0, wx.ALL, 5)
hsizer.Add(self.buttonAbort, 0, wx.ALL, 5)
hsizer.Add(self.slider, 0, wx.ALL, 5)
hsizer.Add(self.textCtrlResult, 0, wx.ALL, 5)
vsizer.Add(hsizer, 0, wx.ALL, 5)
pnl.SetSizer(vsizer)
vsizer.SetSizeHints(self)
self.Bind(wx.EVT_BUTTON, self.handleGet, self.buttonGet)
self.Bind(wx.EVT_BUTTON, self.handleAbort, self.buttonAbort)
class FrameSimpleDelayed(FrameSimpleDelayedBase):
"""This demos simplistic use of delayedresult module."""
def __init__(self, *args, **kwargs):
FrameSimpleDelayedBase.__init__(self, *args, **kwargs)
self.jobID = 0
self.abortEvent = delayedresult.AbortEvent()
self.Bind(wx.EVT_CLOSE, self.handleClose)
def setLog(self, log):
self.log = log
def handleClose(self, event):
"""Only needed because in demo, closing the window does not kill the
app, so worker thread continues and sends result to dead frame; normally
your app would exit so this would not happen."""
if self.buttonAbort.IsEnabled():
self.log( "Exiting: Aborting job %s" % self.jobID )
self.abortEvent.set()
self.Destroy()
def handleGet(self, event):
"""Compute result in separate thread, doesn't affect GUI response."""
self.buttonGet.Enable(False)
self.buttonAbort.Enable(True)
self.abortEvent.clear()
self.jobID += 1
self.log( "Starting job %s in producer thread: GUI remains responsive"
% self.jobID )
delayedresult.startWorker(self._resultConsumer, self._resultProducer,
wargs=(self.jobID,self.abortEvent), jobID=self.jobID)
def _resultProducer(self, jobID, abortEvent):
"""Pretend to be a complex worker function or something that takes
long time to run due to network access etc. GUI will freeze if this
method is not called in separate thread."""
import time
count = 0
while not abortEvent() and count < 50:
time.sleep(0.1)
count += 1
return jobID
def handleAbort(self, event):
"""Abort the result computation."""
self.log( "Aborting result for job %s" % self.jobID )
self.buttonGet.Enable(True)
self.buttonAbort.Enable(False)
self.abortEvent.set()
def _resultConsumer(self, delayedResult):
jobID = delayedResult.getJobID()
assert jobID == self.jobID
try:
result = delayedResult.get()
except Exception, exc:
self.log( "Result for job %s raised exception: %s" % (jobID, exc) )
return
# output result
self.log( "Got result for job %s: %s" % (jobID, result) )
self.textCtrlResult.SetValue(str(result))
# get ready for next job:
self.buttonGet.Enable(True)
self.buttonAbort.Enable(False)
class FrameSimpleDirect(FrameSimpleDelayedBase):
"""This does not use delayedresult so the GUI will freeze while
the GET is taking place."""
def __init__(self, *args, **kwargs):
self.jobID = 1
FrameSimpleDelayedBase.__init__(self, *args, **kwargs)
self.checkboxUseDelayed.SetValue(False)
def setLog(self, log):
self.log = log
def handleGet(self, event):
"""Use delayedresult, this will compute result in separate
thread, and will affect GUI response because a thread is not
used."""
self.buttonGet.Enable(False)
self.buttonAbort.Enable(True)
self.log( "Doing job %s without delayedresult (same as GUI thread): GUI hangs (for a while)" % self.jobID )
result = self._resultProducer(self.jobID)
self._resultConsumer( result )
def _resultProducer(self, jobID):
"""Pretend to be a complex worker function or something that takes
long time to run due to network access etc. GUI will freeze if this
method is not called in separate thread."""
import time
time.sleep(5)
return jobID
def handleAbort(self, event):
"""can never be called"""
pass
def _resultConsumer(self, result):
# output result
self.log( "Got result for job %s: %s" % (self.jobID, result) )
self.textCtrlResult.SetValue(str(result))
# get ready for next job:
self.buttonGet.Enable(True)
self.buttonAbort.Enable(False)
self.jobID += 1
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
vsizer = wx.BoxSizer(wx.VERTICAL)
b = wx.Button(self, -1, "Long-running function in separate thread")
vsizer.Add(b, 0, wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.OnButton1, b)
b = wx.Button(self, -1, "Long-running function in GUI thread")
vsizer.Add(b, 0, wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.OnButton2, b)
bdr = wx.BoxSizer()
bdr.Add(vsizer, 0, wx.ALL, 50)
self.SetSizer(bdr)
self.Layout()
def OnButton1(self, evt):
frame = FrameSimpleDelayed(self, title="Long-running function in separate thread")
frame.setLog(self.log.WriteText)
frame.Show()
def OnButton2(self, evt):
frame = FrameSimpleDirect(self, title="Long-running function in GUI thread")
frame.setLog(self.log.WriteText)
frame.Show()
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#---------------------------------------------------------------------------
overview = __doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| [
"[email protected]"
] | |
cb5613bfd8d6ec14e6bc38d726a3a675e9cbae55 | b563023f73eec953afc43396bf1c26519d69a236 | /web/components/commons/view_mixins.py | 2aa435d87a2e250f12544365c4b1cb1dcd4b18ea | [
"MIT"
] | permissive | pkdevbox/goodtables-web | bf3b18a9ab6e0394320ec9dfa6077e8e47d7a0c8 | 5fe41db5361b54e0a553dbea4cbb73fd55b6418c | refs/heads/master | 2021-01-12T19:25:07.958490 | 2015-08-28T08:31:43 | 2015-08-28T08:31:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from werkzeug.datastructures import FileStorage
from flask import current_app as app
from goodtables.pipeline import Pipeline
from . import utilities
class RunPipelineMixin(object):
def run_pipeline(self, with_permalinks=False):
payload = utilities.clean_payload(utilities.get_runargs())
data = {}
data['sources'] = utilities.get_data_urls()
data['success'] = False
data['report'] = app.config['GOODTABLES_PIPELINE_BUILD_ERROR_RESPONSE']
if with_permalinks:
data['permalinks'] = utilities.get_report_permalinks(payload)
if isinstance(payload['data'], FileStorage):
payload['data'] = payload['data'].stream
# build and run a validation pipeline
try:
pipeline = utilities.get_pipeline(payload)
except Exception as e:
pipeline = None
data['report']['error_title'] = e.__class__.__name__
data['report']['error_message'] = e.msg
if isinstance(pipeline, Pipeline):
success, report = pipeline.run()
data.update({'success': success, 'report': report.generate()})
return data
| [
"[email protected]"
] | |
02fb262b75b7de58af4b869892556e16c808e01e | fdcd1058df2e42ce9a6c7a38b76757997f53cb2a | /muted/system/cmd_say.py | 31d6ec251e2b21423c50c77712481ca4d2855a50 | [
"MIT"
] | permissive | LZJ861124/mute | afb12d516ae1a4106079b51999dd0aa484618b07 | f278d9cd2e9c1a4551d5ecdffde919d22ab2f6bb | refs/heads/master | 2020-05-26T09:41:57.238217 | 2019-05-23T08:19:16 | 2019-05-23T08:19:16 | 188,191,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py |
from __future__ import annotations
from typing import Type
from component.name import Name
from component.role import Role
from event.event import Event
from message.message import Message
from system.channel import Channel
from logcat.logcat import LogCat
class CmdSay:
@LogCat.log_func
def __init__(self, servant: Type[Handler]):
servant.on(Event.CMD_SAY, self._on_cmd_say)
@LogCat.log_func
def _on_cmd_say(
self, e: Event, entity: str = '', args: str = ''
) -> None:
if not args:
text = f'你想說什麼?'
Channel.toRole(entity, Message.TEXT, text)
else:
text = f'{Name.instance(entity).text}說:{" ".join(args)}'
role = Role.instance(entity)
Channel.toRoom(role.room, Message.TEXT, text)
# cmd_say.py
| [
"[email protected]"
] | |
63e71d8483e8d40290c82c2af36178fd719de51a | cfcd117378664e4bea080b3c1011a25a575b3d51 | /hawc/apps/materialized/apps.py | c804446ee46e38f9bba133e2af6ddd04d9bf72c5 | [
"MIT"
] | permissive | shapiromatron/hawc | 9d3a625da54d336334da4576bd5dac6915c18d4f | 51177c6fb9354cd028f7099fc10d83b1051fd50d | refs/heads/main | 2023-08-03T13:04:23.836537 | 2023-08-01T18:39:16 | 2023-08-01T18:39:16 | 25,273,569 | 25 | 15 | NOASSERTION | 2023-09-14T17:03:48 | 2014-10-15T21:06:33 | Python | UTF-8 | Python | false | false | 214 | py | from django.apps import AppConfig
class MaterializedViewsConfig(AppConfig):
name = "hawc.apps.materialized"
verbose_name = "Materialized Views"
def ready(self):
from . import signals # noqa
| [
"[email protected]"
] | |
2e790eddb7e4e03c1d35f0ce13e45eca4d59f04c | 99697559d046cdd04dd9068bd518e4da4177aaa2 | /Empty/M797_All_Paths_From_Source_to_Target.py | 1cf154c64c0e3ba9c99528c0ca04d71a5727b4b7 | [] | no_license | Azurisky/Leetcode | 3e3621ef15f2774cfdfac8c3018e2e4701760c3b | 8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04 | refs/heads/master | 2020-03-18T22:46:35.780864 | 2018-10-07T05:45:30 | 2018-10-07T05:45:30 | 135,364,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | class Solution:
def allPathsSourceTarget(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[List[int]]
""" | [
"[email protected]"
] | |
048e3950baa1ef3fb53501de60e5d22aae233701 | 01c00a769156b010012ce6150c737be43a34a5a7 | /RegressionOneApp/serializers.py | 4176bbb946783bb0d02bdcd4b08ffaeedcef27e5 | [] | no_license | chelseatroy/RegressionOne | 87844893f91aebe4488f23db9121498e6560ee53 | 85d2f2ff5e62f3b291adb575fb980e741223a5ee | refs/heads/master | 2021-04-09T16:09:15.153337 | 2016-07-16T17:06:52 | 2016-07-16T17:06:52 | 62,819,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from rest_framework import serializers
from RegressionOneApp.models import Thingamabob
class ThingamabobSerializer(serializers.ModelSerializer):
class Meta:
model = Thingamabob
fields = ('description', 'done', 'updated') | [
"[email protected]"
] | |
e67c12e5d99b0eab27528388d44d661202817111 | 15d710d6de2033f95c9970f14c22aa0e4bab9647 | /supervised/preprocessing/preprocessing_utils.py | 2ee4c3efdb18ce473b28442df6650c345194372a | [
"MIT"
] | permissive | mmejdoubi/mljar-supervised | 3c9ea1c706e2b279502d57f68ba917c8c9de4890 | 59e7b5b1d005af98681335dbd323bb8b24a32075 | refs/heads/master | 2020-04-21T19:18:18.654469 | 2018-12-19T12:53:03 | 2018-12-19T12:53:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import numpy as np
class PreprocessingUtilsException(Exception):
pass
class PreprocessingUtils(object):
CATEGORICAL = "categorical"
CONTINOUS = "continous"
DISCRETE = "discrete"
@staticmethod
def get_type(x):
if len(x.shape) > 1:
if x.shape[1] != 1:
raise PreprocessingUtilsException(
"Please select one column to get its type"
)
col_type = str(x.dtype)
data_type = PreprocessingUtils.CATEGORICAL
if col_type.startswith("float"):
data_type = PreprocessingUtils.CONTINOUS
elif col_type.startswith("int"):
data_type = PreprocessingUtils.DISCRETE
return data_type
@staticmethod
def get_most_frequent(x):
a = x.value_counts()
first = sorted(dict(a).items(), key=lambda x: -x[1])[0]
return first[0]
@staticmethod
def get_min(x):
return np.amin(np.nanmin(x))
@staticmethod
def get_mean(x):
return np.nanmean(x)
@staticmethod
def get_median(x):
return np.nanmedian(x)
| [
"[email protected]"
] | |
c044ecd51625ba4130756577fc837d5e73f2adfd | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/ospf/invalidstatshist1h.py | e5836c6bdfa0a681b104ac17c3192e27038611f3 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,917 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class InvalidStatsHist1h(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.ospf.InvalidStatsHist1h", "Ospf Invalid Packets")
counter = CounterMeta("invalidDestAddrPktsRcvd", CounterCategory.COUNTER, "packets", "Invalid Destination Addr Packets Received")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "invalidDestAddrPktsRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "invalidDestAddrPktsRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "invalidDestAddrPktsRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "invalidDestAddrPktsRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "invalidDestAddrPktsRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "invalidDestAddrPktsRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "invalidDestAddrPktsRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "invalidDestAddrPktsRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "invalidDestAddrPktsRcvdRate"
meta._counters.append(counter)
counter = CounterMeta("invalidSrcAddrPktsRcvd", CounterCategory.COUNTER, "packets", "Invalid Source Addr Packets Received")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "invalidSrcAddrPktsRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "invalidSrcAddrPktsRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "invalidSrcAddrPktsRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "invalidSrcAddrPktsRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "invalidSrcAddrPktsRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "invalidSrcAddrPktsRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "invalidSrcAddrPktsRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "invalidSrcAddrPktsRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "invalidSrcAddrPktsRcvdRate"
meta._counters.append(counter)
counter = CounterMeta("wrongAreaPktsRcvd", CounterCategory.COUNTER, "packets", "Wrong Area Packets Received")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "wrongAreaPktsRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "wrongAreaPktsRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "wrongAreaPktsRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "wrongAreaPktsRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "wrongAreaPktsRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "wrongAreaPktsRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "wrongAreaPktsRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "wrongAreaPktsRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "wrongAreaPktsRcvdRate"
meta._counters.append(counter)
meta.moClassName = "ospfInvalidStatsHist1h"
meta.rnFormat = "HDospfInvalidStats1h-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Ospf Invalid Packets stats in 1 hour"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.ospf.IfStats")
meta.superClasses.add("cobra.model.ospf.InvalidStatsHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDospfInvalidStats1h-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 47795, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdAvg", "invalidDestAddrPktsRcvdAvg", 48585, PropCategory.IMPLICIT_AVG)
prop.label = "Invalid Destination Addr Packets Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdAvg", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdCum", "invalidDestAddrPktsRcvdCum", 48581, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Invalid Destination Addr Packets Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdCum", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdMax", "invalidDestAddrPktsRcvdMax", 48584, PropCategory.IMPLICIT_MAX)
prop.label = "Invalid Destination Addr Packets Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdMax", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdMin", "invalidDestAddrPktsRcvdMin", 48583, PropCategory.IMPLICIT_MIN)
prop.label = "Invalid Destination Addr Packets Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdMin", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdPer", "invalidDestAddrPktsRcvdPer", 48582, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Invalid Destination Addr Packets Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdPer", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdRate", "invalidDestAddrPktsRcvdRate", 48589, PropCategory.IMPLICIT_RATE)
prop.label = "Invalid Destination Addr Packets Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdRate", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdSpct", "invalidDestAddrPktsRcvdSpct", 48586, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Invalid Destination Addr Packets Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdSpct", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdThr", "invalidDestAddrPktsRcvdThr", 48587, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Invalid Destination Addr Packets Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("invalidDestAddrPktsRcvdThr", prop)
prop = PropMeta("str", "invalidDestAddrPktsRcvdTr", "invalidDestAddrPktsRcvdTr", 48588, PropCategory.IMPLICIT_TREND)
prop.label = "Invalid Destination Addr Packets Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidDestAddrPktsRcvdTr", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdAvg", "invalidSrcAddrPktsRcvdAvg", 48606, PropCategory.IMPLICIT_AVG)
prop.label = "Invalid Source Addr Packets Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdAvg", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdCum", "invalidSrcAddrPktsRcvdCum", 48602, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Invalid Source Addr Packets Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdCum", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdMax", "invalidSrcAddrPktsRcvdMax", 48605, PropCategory.IMPLICIT_MAX)
prop.label = "Invalid Source Addr Packets Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdMax", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdMin", "invalidSrcAddrPktsRcvdMin", 48604, PropCategory.IMPLICIT_MIN)
prop.label = "Invalid Source Addr Packets Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdMin", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdPer", "invalidSrcAddrPktsRcvdPer", 48603, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Invalid Source Addr Packets Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdPer", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdRate", "invalidSrcAddrPktsRcvdRate", 48610, PropCategory.IMPLICIT_RATE)
prop.label = "Invalid Source Addr Packets Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdRate", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdSpct", "invalidSrcAddrPktsRcvdSpct", 48607, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Invalid Source Addr Packets Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdSpct", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdThr", "invalidSrcAddrPktsRcvdThr", 48608, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Invalid Source Addr Packets Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("invalidSrcAddrPktsRcvdThr", prop)
prop = PropMeta("str", "invalidSrcAddrPktsRcvdTr", "invalidSrcAddrPktsRcvdTr", 48609, PropCategory.IMPLICIT_TREND)
prop.label = "Invalid Source Addr Packets Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("invalidSrcAddrPktsRcvdTr", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdAvg", "wrongAreaPktsRcvdAvg", 48627, PropCategory.IMPLICIT_AVG)
prop.label = "Wrong Area Packets Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdAvg", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdCum", "wrongAreaPktsRcvdCum", 48623, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Wrong Area Packets Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdCum", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdMax", "wrongAreaPktsRcvdMax", 48626, PropCategory.IMPLICIT_MAX)
prop.label = "Wrong Area Packets Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdMax", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdMin", "wrongAreaPktsRcvdMin", 48625, PropCategory.IMPLICIT_MIN)
prop.label = "Wrong Area Packets Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdMin", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdPer", "wrongAreaPktsRcvdPer", 48624, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Wrong Area Packets Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdPer", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdRate", "wrongAreaPktsRcvdRate", 48631, PropCategory.IMPLICIT_RATE)
prop.label = "Wrong Area Packets Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdRate", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdSpct", "wrongAreaPktsRcvdSpct", 48628, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Wrong Area Packets Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdSpct", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdThr", "wrongAreaPktsRcvdThr", 48629, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Wrong Area Packets Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("wrongAreaPktsRcvdThr", prop)
prop = PropMeta("str", "wrongAreaPktsRcvdTr", "wrongAreaPktsRcvdTr", 48630, PropCategory.IMPLICIT_TREND)
prop.label = "Wrong Area Packets Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("wrongAreaPktsRcvdTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
11bdfbe141524cc597b00398c0e83a11f79ed8d3 | fd7863c9f2d1d3ede7a91d50419095224ab4598d | /torinometeo/core/templatetags/core_tags.py | fb03a9fc6bb21c1e2752c1953e5a3ff7b939d914 | [] | no_license | TorinoMeteo/tm-website | 9b80344d83ef2aa7c4c820f2cea093fdaa9c77fb | a6becc62eaf5c96e146431631c0d081600e7c5d3 | refs/heads/master | 2023-08-21T14:46:35.825982 | 2023-08-09T07:25:07 | 2023-08-09T07:25:07 | 31,906,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from django import template
from django.contrib.sites.shortcuts import get_current_site
from sorl.thumbnail.templatetags.thumbnail import ThumbnailNode
register = template.Library()
@register.filter()
def strip_img(html):
import re
TAG_RE = re.compile(r'<img.+?/>')
return TAG_RE.sub('', html)
@register.filter()
def absurl(url):
request = None
return ''.join(['http://', get_current_site(request).domain, str(url)])
@register.inclusion_tag('core/sharethis.html')
def sharethis(relative_url, title=''):
return {'url': relative_url, 'title': title}
def sorl_thumbnail(parser, token):
return ThumbnailNode(parser, token)
register.tag(sorl_thumbnail)
| [
"[email protected]"
] | |
6ccb93f049e1ea71254fac76ac7ef5977ace21c4 | 425b68346e1fbd20ced43a4c1f3bc284d66538f4 | /adanet/core/evaluator.py | 78f6fdc92e1e4b157536cf104b51b5b952588825 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | todun/adanet | b29981f20203660091c122c1c0c7bc684749423c | 74106c51e0602bdd62b643f4d6c42a00142947bc | refs/heads/master | 2020-06-25T00:53:51.073322 | 2019-07-26T21:49:36 | 2019-07-26T22:16:26 | 199,146,094 | 1 | 0 | Apache-2.0 | 2019-07-27T09:46:57 | 2019-07-27T09:46:56 | null | UTF-8 | Python | false | false | 3,261 | py | """An AdaNet evaluator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
from adanet import tf_compat
import tensorflow as tf
class Evaluator(object):
"""Evaluates candidate ensemble performance.
Args:
input_fn: Input function returning a tuple of: features - Dictionary of
string feature name to `Tensor`. labels - `Tensor` of labels.
steps: Number of steps for which to evaluate the ensembles. If an
`OutOfRangeError` occurs, evaluation stops. If set to None, will iterate
the dataset until all inputs are exhausted.
Returns:
An :class:`adanet.Evaluator` instance.
"""
def __init__(self, input_fn, steps=None):
self._input_fn = input_fn
self._steps = steps
super(Evaluator, self).__init__()
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return self._steps
def evaluate_adanet_losses(self, sess, adanet_losses):
"""Evaluates the given AdaNet objectives on the data from `input_fn`.
The candidates are fed the same batches of features and labels as
provided by `input_fn`, and their losses are computed and summed over
`steps` batches.
Args:
sess: `Session` instance with most recent variable values loaded.
adanet_losses: List of AdaNet loss `Tensors`.
Returns:
List of evaluated AdaNet losses.
"""
evals_completed = 0
if self.steps is None:
logging_frequency = 1000
elif self.steps < 10:
logging_frequency = 1
else:
logging_frequency = math.floor(self.steps / 10.)
adanet_losses = [
tf_compat.v1.metrics.mean(adanet_loss) for adanet_loss in adanet_losses
]
sess.run(tf_compat.v1.local_variables_initializer())
while True:
if self.steps is not None and evals_completed == self.steps:
break
try:
evals_completed += 1
if (evals_completed % logging_frequency == 0 or
self.steps == evals_completed):
logging.info("Ensemble evaluation [%d/%s]", evals_completed,
self.steps or "??")
sess.run(adanet_losses)
except tf.errors.OutOfRangeError:
logging.info("Encountered end of input after %d evaluations",
evals_completed)
break
# Losses are metric op tuples. Evaluating the first element is idempotent.
adanet_losses = [loss[0] for loss in adanet_losses]
return sess.run(adanet_losses)
| [
"[email protected]"
] | |
d2296d7676651e8e0b3c8042b94248e1dd922d48 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/1169.py | f23ae477662433aef243097ffc171b1263c142f0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | import numpy as np
import sys
def evaluateLeadingTestCaseAndReturnAnswer():
global lines
N = int(lines.pop(0))
NaomiSetWar = sorted(map(lambda x: float(x), lines.pop(0).split(' ')))
KenSetWar = sorted(map(lambda x: float(x), lines.pop(0).split(' ')))
counterWar = 0
NaomiSetDeceitfulWar = list(NaomiSetWar)
KenSetDeceitfulWar = list(KenSetWar)
counterDeceitfulWar = 0
#Deceitful War
if N == 1:
if NaomiSetDeceitfulWar[0] < KenSetDeceitfulWar[0]:
counterDeceitfulWar = 0
else:
counterDeceitfulWar = 1
else:
for i in xrange(N):
n = NaomiSetDeceitfulWar.pop(0)
if n < KenSetDeceitfulWar[0]:
KenSetDeceitfulWar.pop(-1)
else:
KenSetDeceitfulWar.pop(0)
counterDeceitfulWar += 1
#War
if N == 1:
if NaomiSetWar[0] < KenSetWar[0]:
counterWar = 0
else:
counterWar = 1
else:
for i in xrange(N):
n = NaomiSetWar.pop(0)
if n < KenSetWar[-1]:
for j in KenSetWar:
if j > n:
KenSetWar.pop(KenSetWar.index(j))
break
else:
counterWar += len(NaomiSetWar) + 1
break
return (counterDeceitfulWar, counterWar)
def returnFormattedAnswer(caseNum, x):
g.write('Case #%d: %d %d\n' % (caseNum, x[0], x[1]))
if __name__=='__main__':
if len(sys.argv) != 3:
print 'Provide arg1: input file, arg2: output file.'
else:
f = open(sys.argv[1])
g = file(sys.argv[2], 'w')
lines = map(lambda x: x.strip('\n'), f.readlines())
numOfTestCases = int(lines.pop(0))
for i in xrange(1, numOfTestCases + 1):
returnFormattedAnswer(i, evaluateLeadingTestCaseAndReturnAnswer())
f.close()
g.close() | [
"[email protected]"
] | |
87c425297ee0f88d7daca1b9febdf1f89e65da92 | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/696.count-binary-substrings/696.count-binary-substrings_123598239.py | 74f334383e8c7d8ab23645fa15bf402b2364784a | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | class Solution(object):
def countBinarySubstrings(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
leftLength = [1]*len(s)
rightLength = [1]*len(s)
for i in range(1, len(s)):
if s[i] == s[i-1]:
leftLength[i] = leftLength[i-1] + 1
for i in reversed(range(len(s)-1)):
if s[i] == s[i+1]:
rightLength[i] = rightLength[i+1] + 1
ans = 0
for i in range(1, len(s)):
if s[i] == s[i-1]:
continue
ans += min(leftLength[i-1], rightLength[i])
return ans | [
"[email protected]"
] | |
a4c98aaaef0c88024c048111c781ba6424cc35d7 | 2e07f6b94fc0f7a5cf55002040151b8745fd843d | /privious_learning_code/OS_Handling/os.tempnam() Method.py | d524bb204572241ac441fecbb617774578dd3cfc | [] | no_license | LalithK90/LearningPython | a7e6404e900b7d66c663acc72cde3e3655d54ac7 | ece38fdac88da66c8b76fe710b3df7d8635a3590 | refs/heads/master | 2023-06-09T22:32:16.674821 | 2021-06-27T18:55:00 | 2021-06-27T18:55:00 | 169,513,150 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 572 | py |
# Description
#
# The method tempnam() returns a unique path name that is reasonable for creating a temporary file.
# Syntax
#
# Following is the syntax for tempnam() method −
#
# os.tempnam(dir, prefix)
#
# Parameters
#
# dir − This is the dir where the temporary filename will be created.
#
# prefix − This is the prefix of the generated temporary filename.
#
# Return Value
#
# This method returns a unique path.
# Example
import os, sys
# prefix is tuts1 of the generated file
tmpfn = os.tempnam('/tmp/dir,'tut')
print("This is the unique path:")
print(tmpfn) | [
"[email protected]"
] | |
16ddceaec23fd6336f6cedccdb2aebf64798ed80 | 2f330fc050de11676ab46b963b7878882e9b6614 | /memsource_cli/models/page_dto_web_hook_dto.py | c508f9487b1dcee988c8b5bd9368135c2fd969fa | [
"Apache-2.0"
] | permissive | zerodayz/memsource-cli-client | 609f48c18a2b6daaa639d4cb8a61da43763b5143 | c2574f1467539a49e6637c874e88d75c7ef789b3 | refs/heads/master | 2020-08-01T12:43:06.497982 | 2019-09-30T11:14:13 | 2019-09-30T11:14:13 | 210,999,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,386 | py | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:[email protected]>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.web_hook_dto import WebHookDto # noqa: F401,E501
class PageDtoWebHookDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_elements': 'int',
'total_pages': 'int',
'page_size': 'int',
'page_number': 'int',
'number_of_elements': 'int',
'content': 'list[WebHookDto]'
}
attribute_map = {
'total_elements': 'totalElements',
'total_pages': 'totalPages',
'page_size': 'pageSize',
'page_number': 'pageNumber',
'number_of_elements': 'numberOfElements',
'content': 'content'
}
def __init__(self, total_elements=None, total_pages=None, page_size=None, page_number=None, number_of_elements=None, content=None): # noqa: E501
"""PageDtoWebHookDto - a model defined in Swagger""" # noqa: E501
self._total_elements = None
self._total_pages = None
self._page_size = None
self._page_number = None
self._number_of_elements = None
self._content = None
self.discriminator = None
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
if page_size is not None:
self.page_size = page_size
if page_number is not None:
self.page_number = page_number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if content is not None:
self.content = content
@property
def total_elements(self):
"""Gets the total_elements of this PageDtoWebHookDto. # noqa: E501
:return: The total_elements of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageDtoWebHookDto.
:param total_elements: The total_elements of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageDtoWebHookDto. # noqa: E501
:return: The total_pages of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageDtoWebHookDto.
:param total_pages: The total_pages of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._total_pages = total_pages
@property
def page_size(self):
"""Gets the page_size of this PageDtoWebHookDto. # noqa: E501
:return: The page_size of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this PageDtoWebHookDto.
:param page_size: The page_size of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._page_size = page_size
@property
def page_number(self):
"""Gets the page_number of this PageDtoWebHookDto. # noqa: E501
:return: The page_number of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""Sets the page_number of this PageDtoWebHookDto.
:param page_number: The page_number of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._page_number = page_number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageDtoWebHookDto. # noqa: E501
:return: The number_of_elements of this PageDtoWebHookDto. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageDtoWebHookDto.
:param number_of_elements: The number_of_elements of this PageDtoWebHookDto. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def content(self):
"""Gets the content of this PageDtoWebHookDto. # noqa: E501
:return: The content of this PageDtoWebHookDto. # noqa: E501
:rtype: list[WebHookDto]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageDtoWebHookDto.
:param content: The content of this PageDtoWebHookDto. # noqa: E501
:type: list[WebHookDto]
"""
self._content = content
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PageDtoWebHookDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageDtoWebHookDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c7bfd8a1db51a34b917578014b643d36a745a476 | 3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d | /chapter03/db_operation_demo/venv/Scripts/easy_install-script.py | 0eb38f841eaf8e215cee3d2d5d44bb0c06bec793 | [] | no_license | yingkun1/python-django | a3084460a83682f3e0848d5b40c881f93961ecc2 | 08c9ed3771eb245ee9ff66f67cf28730d2675bbe | refs/heads/master | 2022-12-11T12:33:20.788524 | 2019-06-12T09:30:59 | 2019-06-12T09:30:59 | 189,977,625 | 1 | 0 | null | 2022-11-22T02:57:01 | 2019-06-03T09:43:30 | Python | UTF-8 | Python | false | false | 456 | py | #!E:\python-django\chapter03\db_operation_demo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
92d1bf6985a2e1c8234fcedb30626aebc41f5f35 | 5b9f9b4ea1494943e6f7f842df55909599ed1304 | /python/onshape_client/oas/models/card.py | 2bc04cfc5daeba4a0aa5f7f2068893f0356d36fb | [] | no_license | jenniferyoung02/onshape-clients | f50534f033428027515b7fc0b801b1caab4d0aec | 8ee31a17d7af32f105b851e45f69fd4a3006e1ba | refs/heads/master | 2020-09-07T06:44:37.682545 | 2019-10-08T18:52:06 | 2019-10-08T18:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,798 | py | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.104
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Card(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'object': 'str',
'account': 'str',
'customer': 'str',
'metadata': 'dict(str, str)',
'address_city': 'str',
'address_country': 'str',
'address_line1': 'str',
'address_line1_check': 'str',
'address_line2': 'str',
'address_state': 'str',
'address_zip': 'str',
'address_zip_check': 'str',
'available_payout_methods': 'list[str]',
'brand': 'str',
'country': 'str',
'currency': 'str',
'cvc_check': 'str',
'default_for_currency': 'bool',
'dynamic_last4': 'str',
'exp_month': 'int',
'exp_year': 'int',
'fingerprint': 'str',
'funding': 'str',
'last4': 'str',
'name': 'str',
'recipient': 'str',
'status': 'str',
'three_d_secure': 'ThreeDSecure',
'tokenization_method': 'str',
'description': 'str',
'iin': 'str',
'issuer': 'str',
'type': 'str',
'instance_url': 'str'
}
attribute_map = {
'id': 'id',
'object': 'object',
'account': 'account',
'customer': 'customer',
'metadata': 'metadata',
'address_city': 'addressCity',
'address_country': 'addressCountry',
'address_line1': 'addressLine1',
'address_line1_check': 'addressLine1Check',
'address_line2': 'addressLine2',
'address_state': 'addressState',
'address_zip': 'addressZip',
'address_zip_check': 'addressZipCheck',
'available_payout_methods': 'availablePayoutMethods',
'brand': 'brand',
'country': 'country',
'currency': 'currency',
'cvc_check': 'cvcCheck',
'default_for_currency': 'defaultForCurrency',
'dynamic_last4': 'dynamicLast4',
'exp_month': 'expMonth',
'exp_year': 'expYear',
'fingerprint': 'fingerprint',
'funding': 'funding',
'last4': 'last4',
'name': 'name',
'recipient': 'recipient',
'status': 'status',
'three_d_secure': 'threeDSecure',
'tokenization_method': 'tokenizationMethod',
'description': 'description',
'iin': 'iin',
'issuer': 'issuer',
'type': 'type',
'instance_url': 'instanceURL'
}
def __init__(self, id=None, object=None, account=None, customer=None, metadata=None, address_city=None, address_country=None, address_line1=None, address_line1_check=None, address_line2=None, address_state=None, address_zip=None, address_zip_check=None, available_payout_methods=None, brand=None, country=None, currency=None, cvc_check=None, default_for_currency=None, dynamic_last4=None, exp_month=None, exp_year=None, fingerprint=None, funding=None, last4=None, name=None, recipient=None, status=None, three_d_secure=None, tokenization_method=None, description=None, iin=None, issuer=None, type=None, instance_url=None): # noqa: E501
"""Card - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._object = None
self._account = None
self._customer = None
self._metadata = None
self._address_city = None
self._address_country = None
self._address_line1 = None
self._address_line1_check = None
self._address_line2 = None
self._address_state = None
self._address_zip = None
self._address_zip_check = None
self._available_payout_methods = None
self._brand = None
self._country = None
self._currency = None
self._cvc_check = None
self._default_for_currency = None
self._dynamic_last4 = None
self._exp_month = None
self._exp_year = None
self._fingerprint = None
self._funding = None
self._last4 = None
self._name = None
self._recipient = None
self._status = None
self._three_d_secure = None
self._tokenization_method = None
self._description = None
self._iin = None
self._issuer = None
self._type = None
self._instance_url = None
self.discriminator = None
if id is not None:
self.id = id
if object is not None:
self.object = object
if account is not None:
self.account = account
if customer is not None:
self.customer = customer
if metadata is not None:
self.metadata = metadata
if address_city is not None:
self.address_city = address_city
if address_country is not None:
self.address_country = address_country
if address_line1 is not None:
self.address_line1 = address_line1
if address_line1_check is not None:
self.address_line1_check = address_line1_check
if address_line2 is not None:
self.address_line2 = address_line2
if address_state is not None:
self.address_state = address_state
if address_zip is not None:
self.address_zip = address_zip
if address_zip_check is not None:
self.address_zip_check = address_zip_check
if available_payout_methods is not None:
self.available_payout_methods = available_payout_methods
if brand is not None:
self.brand = brand
if country is not None:
self.country = country
if currency is not None:
self.currency = currency
if cvc_check is not None:
self.cvc_check = cvc_check
if default_for_currency is not None:
self.default_for_currency = default_for_currency
if dynamic_last4 is not None:
self.dynamic_last4 = dynamic_last4
if exp_month is not None:
self.exp_month = exp_month
if exp_year is not None:
self.exp_year = exp_year
if fingerprint is not None:
self.fingerprint = fingerprint
if funding is not None:
self.funding = funding
if last4 is not None:
self.last4 = last4
if name is not None:
self.name = name
if recipient is not None:
self.recipient = recipient
if status is not None:
self.status = status
if three_d_secure is not None:
self.three_d_secure = three_d_secure
if tokenization_method is not None:
self.tokenization_method = tokenization_method
if description is not None:
self.description = description
if iin is not None:
self.iin = iin
if issuer is not None:
self.issuer = issuer
if type is not None:
self.type = type
if instance_url is not None:
self.instance_url = instance_url
@property
def id(self):
"""Gets the id of this Card. # noqa: E501
:return: The id of this Card. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Card.
:param id: The id of this Card. # noqa: E501
:type: str
"""
self._id = id
@property
def object(self):
"""Gets the object of this Card. # noqa: E501
:return: The object of this Card. # noqa: E501
:rtype: str
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this Card.
:param object: The object of this Card. # noqa: E501
:type: str
"""
self._object = object
@property
def account(self):
"""Gets the account of this Card. # noqa: E501
:return: The account of this Card. # noqa: E501
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this Card.
:param account: The account of this Card. # noqa: E501
:type: str
"""
self._account = account
@property
def customer(self):
"""Gets the customer of this Card. # noqa: E501
:return: The customer of this Card. # noqa: E501
:rtype: str
"""
return self._customer
@customer.setter
def customer(self, customer):
"""Sets the customer of this Card.
:param customer: The customer of this Card. # noqa: E501
:type: str
"""
self._customer = customer
@property
def metadata(self):
"""Gets the metadata of this Card. # noqa: E501
:return: The metadata of this Card. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Card.
:param metadata: The metadata of this Card. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def address_city(self):
"""Gets the address_city of this Card. # noqa: E501
:return: The address_city of this Card. # noqa: E501
:rtype: str
"""
return self._address_city
@address_city.setter
def address_city(self, address_city):
"""Sets the address_city of this Card.
:param address_city: The address_city of this Card. # noqa: E501
:type: str
"""
self._address_city = address_city
@property
def address_country(self):
"""Gets the address_country of this Card. # noqa: E501
:return: The address_country of this Card. # noqa: E501
:rtype: str
"""
return self._address_country
@address_country.setter
def address_country(self, address_country):
"""Sets the address_country of this Card.
:param address_country: The address_country of this Card. # noqa: E501
:type: str
"""
self._address_country = address_country
@property
def address_line1(self):
"""Gets the address_line1 of this Card. # noqa: E501
:return: The address_line1 of this Card. # noqa: E501
:rtype: str
"""
return self._address_line1
@address_line1.setter
def address_line1(self, address_line1):
"""Sets the address_line1 of this Card.
:param address_line1: The address_line1 of this Card. # noqa: E501
:type: str
"""
self._address_line1 = address_line1
@property
def address_line1_check(self):
"""Gets the address_line1_check of this Card. # noqa: E501
:return: The address_line1_check of this Card. # noqa: E501
:rtype: str
"""
return self._address_line1_check
@address_line1_check.setter
def address_line1_check(self, address_line1_check):
"""Sets the address_line1_check of this Card.
:param address_line1_check: The address_line1_check of this Card. # noqa: E501
:type: str
"""
self._address_line1_check = address_line1_check
@property
def address_line2(self):
"""Gets the address_line2 of this Card. # noqa: E501
:return: The address_line2 of this Card. # noqa: E501
:rtype: str
"""
return self._address_line2
@address_line2.setter
def address_line2(self, address_line2):
"""Sets the address_line2 of this Card.
:param address_line2: The address_line2 of this Card. # noqa: E501
:type: str
"""
self._address_line2 = address_line2
@property
def address_state(self):
"""Gets the address_state of this Card. # noqa: E501
:return: The address_state of this Card. # noqa: E501
:rtype: str
"""
return self._address_state
@address_state.setter
def address_state(self, address_state):
"""Sets the address_state of this Card.
:param address_state: The address_state of this Card. # noqa: E501
:type: str
"""
self._address_state = address_state
@property
def address_zip(self):
"""Gets the address_zip of this Card. # noqa: E501
:return: The address_zip of this Card. # noqa: E501
:rtype: str
"""
return self._address_zip
@address_zip.setter
def address_zip(self, address_zip):
"""Sets the address_zip of this Card.
:param address_zip: The address_zip of this Card. # noqa: E501
:type: str
"""
self._address_zip = address_zip
@property
def address_zip_check(self):
"""Gets the address_zip_check of this Card. # noqa: E501
:return: The address_zip_check of this Card. # noqa: E501
:rtype: str
"""
return self._address_zip_check
@address_zip_check.setter
def address_zip_check(self, address_zip_check):
"""Sets the address_zip_check of this Card.
:param address_zip_check: The address_zip_check of this Card. # noqa: E501
:type: str
"""
self._address_zip_check = address_zip_check
@property
def available_payout_methods(self):
"""Gets the available_payout_methods of this Card. # noqa: E501
:return: The available_payout_methods of this Card. # noqa: E501
:rtype: list[str]
"""
return self._available_payout_methods
@available_payout_methods.setter
def available_payout_methods(self, available_payout_methods):
"""Sets the available_payout_methods of this Card.
:param available_payout_methods: The available_payout_methods of this Card. # noqa: E501
:type: list[str]
"""
self._available_payout_methods = available_payout_methods
@property
def brand(self):
"""Gets the brand of this Card. # noqa: E501
:return: The brand of this Card. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this Card.
:param brand: The brand of this Card. # noqa: E501
:type: str
"""
self._brand = brand
@property
def country(self):
"""Gets the country of this Card. # noqa: E501
:return: The country of this Card. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this Card.
:param country: The country of this Card. # noqa: E501
:type: str
"""
self._country = country
@property
def currency(self):
"""Gets the currency of this Card. # noqa: E501
:return: The currency of this Card. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this Card.
:param currency: The currency of this Card. # noqa: E501
:type: str
"""
self._currency = currency
@property
def cvc_check(self):
"""Gets the cvc_check of this Card. # noqa: E501
:return: The cvc_check of this Card. # noqa: E501
:rtype: str
"""
return self._cvc_check
@cvc_check.setter
def cvc_check(self, cvc_check):
"""Sets the cvc_check of this Card.
:param cvc_check: The cvc_check of this Card. # noqa: E501
:type: str
"""
self._cvc_check = cvc_check
@property
def default_for_currency(self):
"""Gets the default_for_currency of this Card. # noqa: E501
:return: The default_for_currency of this Card. # noqa: E501
:rtype: bool
"""
return self._default_for_currency
@default_for_currency.setter
def default_for_currency(self, default_for_currency):
"""Sets the default_for_currency of this Card.
:param default_for_currency: The default_for_currency of this Card. # noqa: E501
:type: bool
"""
self._default_for_currency = default_for_currency
@property
def dynamic_last4(self):
"""Gets the dynamic_last4 of this Card. # noqa: E501
:return: The dynamic_last4 of this Card. # noqa: E501
:rtype: str
"""
return self._dynamic_last4
@dynamic_last4.setter
def dynamic_last4(self, dynamic_last4):
"""Sets the dynamic_last4 of this Card.
:param dynamic_last4: The dynamic_last4 of this Card. # noqa: E501
:type: str
"""
self._dynamic_last4 = dynamic_last4
@property
def exp_month(self):
"""Gets the exp_month of this Card. # noqa: E501
:return: The exp_month of this Card. # noqa: E501
:rtype: int
"""
return self._exp_month
@exp_month.setter
def exp_month(self, exp_month):
"""Sets the exp_month of this Card.
:param exp_month: The exp_month of this Card. # noqa: E501
:type: int
"""
self._exp_month = exp_month
@property
def exp_year(self):
"""Gets the exp_year of this Card. # noqa: E501
:return: The exp_year of this Card. # noqa: E501
:rtype: int
"""
return self._exp_year
@exp_year.setter
def exp_year(self, exp_year):
"""Sets the exp_year of this Card.
:param exp_year: The exp_year of this Card. # noqa: E501
:type: int
"""
self._exp_year = exp_year
@property
def fingerprint(self):
"""Gets the fingerprint of this Card. # noqa: E501
:return: The fingerprint of this Card. # noqa: E501
:rtype: str
"""
return self._fingerprint
@fingerprint.setter
def fingerprint(self, fingerprint):
"""Sets the fingerprint of this Card.
:param fingerprint: The fingerprint of this Card. # noqa: E501
:type: str
"""
self._fingerprint = fingerprint
@property
def funding(self):
"""Gets the funding of this Card. # noqa: E501
:return: The funding of this Card. # noqa: E501
:rtype: str
"""
return self._funding
@funding.setter
def funding(self, funding):
"""Sets the funding of this Card.
:param funding: The funding of this Card. # noqa: E501
:type: str
"""
self._funding = funding
@property
def last4(self):
"""Gets the last4 of this Card. # noqa: E501
:return: The last4 of this Card. # noqa: E501
:rtype: str
"""
return self._last4
@last4.setter
def last4(self, last4):
"""Sets the last4 of this Card.
:param last4: The last4 of this Card. # noqa: E501
:type: str
"""
self._last4 = last4
@property
def name(self):
"""Gets the name of this Card. # noqa: E501
:return: The name of this Card. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Card.
:param name: The name of this Card. # noqa: E501
:type: str
"""
self._name = name
@property
def recipient(self):
"""Gets the recipient of this Card. # noqa: E501
:return: The recipient of this Card. # noqa: E501
:rtype: str
"""
return self._recipient
@recipient.setter
def recipient(self, recipient):
"""Sets the recipient of this Card.
:param recipient: The recipient of this Card. # noqa: E501
:type: str
"""
self._recipient = recipient
@property
def status(self):
"""Gets the status of this Card. # noqa: E501
:return: The status of this Card. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Card.
:param status: The status of this Card. # noqa: E501
:type: str
"""
self._status = status
@property
def three_d_secure(self):
"""Gets the three_d_secure of this Card. # noqa: E501
:return: The three_d_secure of this Card. # noqa: E501
:rtype: ThreeDSecure
"""
return self._three_d_secure
@three_d_secure.setter
def three_d_secure(self, three_d_secure):
"""Sets the three_d_secure of this Card.
:param three_d_secure: The three_d_secure of this Card. # noqa: E501
:type: ThreeDSecure
"""
self._three_d_secure = three_d_secure
@property
def tokenization_method(self):
"""Gets the tokenization_method of this Card. # noqa: E501
:return: The tokenization_method of this Card. # noqa: E501
:rtype: str
"""
return self._tokenization_method
@tokenization_method.setter
def tokenization_method(self, tokenization_method):
"""Sets the tokenization_method of this Card.
:param tokenization_method: The tokenization_method of this Card. # noqa: E501
:type: str
"""
self._tokenization_method = tokenization_method
@property
def description(self):
"""Gets the description of this Card. # noqa: E501
:return: The description of this Card. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Card.
:param description: The description of this Card. # noqa: E501
:type: str
"""
self._description = description
@property
def iin(self):
"""Gets the iin of this Card. # noqa: E501
:return: The iin of this Card. # noqa: E501
:rtype: str
"""
return self._iin
@iin.setter
def iin(self, iin):
"""Sets the iin of this Card.
:param iin: The iin of this Card. # noqa: E501
:type: str
"""
self._iin = iin
@property
def issuer(self):
"""Gets the issuer of this Card. # noqa: E501
:return: The issuer of this Card. # noqa: E501
:rtype: str
"""
return self._issuer
@issuer.setter
def issuer(self, issuer):
"""Sets the issuer of this Card.
:param issuer: The issuer of this Card. # noqa: E501
:type: str
"""
self._issuer = issuer
@property
def type(self):
"""Gets the type of this Card. # noqa: E501
:return: The type of this Card. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Card.
:param type: The type of this Card. # noqa: E501
:type: str
"""
self._type = type
@property
def instance_url(self):
"""Gets the instance_url of this Card. # noqa: E501
:return: The instance_url of this Card. # noqa: E501
:rtype: str
"""
return self._instance_url
@instance_url.setter
def instance_url(self, instance_url):
"""Sets the instance_url of this Card.
:param instance_url: The instance_url of this Card. # noqa: E501
:type: str
"""
self._instance_url = instance_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Card):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
42e3bab964ade496cdf233fb3d15035714911e4e | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnexquisit.py | a74019d7bf4aac4f29e8dd0df7212f110070044c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,760 | py | ii = [('BentJDO2.py', 3), ('CookGHP3.py', 6), ('RogePAV2.py', 9), ('GodwWSL2.py', 19), ('RogePAV.py', 11), ('FerrSDO3.py', 1), ('WilbRLW.py', 2), ('WilbRLW4.py', 2), ('ProuWCM.py', 1), ('CookGHP.py', 2), ('ShawHDE.py', 3), ('LeakWTI2.py', 1), ('KembFJ1.py', 13), ('WilbRLW5.py', 3), ('PettTHE.py', 1), ('TennAP.py', 3), ('BailJD2.py', 3), ('ChalTPW2.py', 21), ('GellWPT.py', 1), ('AdamWEP.py', 2), ('ClarGE2.py', 3), ('WilkJMC2.py', 1), ('CarlTFR.py', 3), ('LyttELD.py', 6), ('TalfTAC.py', 2), ('RoscTTI3.py', 2), ('AinsWRR3.py', 4), ('CookGHP2.py', 2), ('KiddJAE.py', 2), ('AdamHMM.py', 1), ('BailJD1.py', 1), ('RoscTTI2.py', 10), ('CoolWHM.py', 1), ('CrokTPS.py', 4), ('ClarGE.py', 15), ('LandWPA.py', 2), ('BuckWGM.py', 6), ('GilmCRS.py', 11), ('WestJIT2.py', 1), ('DibdTRL2.py', 20), ('AinsWRR.py', 6), ('MedwTAI.py', 10), ('WadeJEB.py', 4), ('FerrSDO2.py', 2), ('TalfTIT.py', 5), ('GodwWLN.py', 4), ('CoopJBT.py', 1), ('KirbWPW2.py', 1), ('LeakWTI4.py', 2), ('MedwTAI2.py', 2), ('SoutRD.py', 2), ('BuckWGM2.py', 1), ('HowiWRL2.py', 5), ('BailJD3.py', 1), ('WilkJMC.py', 2), ('HogaGMM.py', 26), ('MartHRW.py', 6), ('MackCNH.py', 3), ('WestJIT.py', 1), ('FitzRNS4.py', 3), ('CoolWHM3.py', 1), ('EdgeMHT.py', 5), ('BowrJMM.py', 2), ('RoscTTI.py', 5), ('StorJCC.py', 1), ('KembFJ2.py', 14), ('LewiMJW.py', 4), ('MackCNH2.py', 1), ('BellCHM.py', 18), ('WilbRLW3.py', 2), ('AinsWRR2.py', 3), ('JacoWHI.py', 2), ('ClarGE3.py', 22), ('RogeSIP.py', 1), ('MartHRW2.py', 5), ('DibdTRL.py', 10), ('FitzRNS2.py', 1), ('HogaGMM2.py', 24), ('MartHSI.py', 7), ('EvarJSP.py', 1), ('SadlMLP2.py', 4), ('BowrJMM2.py', 1), ('BeckWRE.py', 6), ('TaylIF.py', 3), ('WordWYR.py', 2), ('DibdTBR.py', 3), ('ChalTPW.py', 11), ('KeigTSS.py', 2), ('ClarGE4.py', 35), ('HowiWRL.py', 8)] | [
"[email protected]"
] | |
6afded1246d1f7062c490a1205ece14505720572 | a8750439f200e4efc11715df797489f30e9828c6 | /codechef/KJCS2018_BOOK.py | b948dc53f60db9e60b446ba500360c8fc4f61da7 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py |
# -*- coding: utf-8 -*-
# @Date : 2018-09-30 15:05:32
# @Author : raj lath ([email protected])
# @Link : link
# @Version : 1.0.0
from sys import stdin
from itertools import accumulate
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
for _ in range(read_int()):
n, m = read_ints()
book = read_ints()
left = read_ints()
rite = read_ints()
limit = max(rite) + 1
beg = [0] * (limit)
end = [0] * (limit)
for x in book:
beg[x] += 1
end[x] += x
begs = list(accumulate(beg))
ends = list(accumulate(end))
for i in range(n):
print(begs[rite[i]] - begs[left[i] - 1], ends[rite[i]] - ends[left[i] - 1])
| [
"[email protected]"
] | |
1568a018a87eb7fb715c30958272328cd5f4e86c | 9f2445e9a00cc34eebcf3d3f60124d0388dcb613 | /2019-12-25-Parametersearch_Hay2011/plotcsv.py | 4cf4ec481715b8ba3cc2c53eac9f70c8528747cc | [] | no_license | analkumar2/Thesis-work | 7ee916d71f04a60afbd117325df588908518b7d2 | 75905427c2a78a101b4eed2c27a955867c04465c | refs/heads/master | 2022-01-02T02:33:35.864896 | 2021-12-18T03:34:04 | 2021-12-18T03:34:04 | 201,130,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py | #exec(open('plotcsv.py').read())
import matplotlib as mpl
mpl.rcParams["savefig.directory"] = '/mnt/c/Analkumar2/Study/Biology/Neuroscience/2018 - 23 PhD Thesis/Thesis work'
import csv
import matplotlib.pyplot as plt
import numpy as np
import os
from neo.io import AxonIO
import pandas as pd
invidx = 1
Injectcurr = 300e-12
preStimTime = 0.5
foldername = os.path.basename(os.getcwd())
Pl = pd.read_csv(f'../../Output/{foldername}/Parametersdf.csv').tail(invidx).iloc[0]
Parameters = {key:Pl[key] for key in Pl.keys()}
Vtrace = list(pd.read_csv(f'../../Output/{foldername}/Vmvecdf.csv').tail(invidx).iloc[0])
ttrace = list(pd.read_csv(f'../../Output/{foldername}/tvecdf.csv').tail(invidx).iloc[0])
def exp_tracef(Injectcurr=150e-12):
global flnme
global exp_sampdur
global exp_samprate
global exp_samppoints
global exp_trace_injend
global exp_trace_injstart
stim1391 = ['Cell 3 of 181016.abf', 'cell 4 of 61016.abf', 'cell 4 of 111016.abf', 'cell 4 of 131016.abf', 'Cell 4 of 181016.abf', 'cell 5 of 61016.abf', 'Cell 5 of 181016.abf']
# flnme = 'Cell 3 of 10717.abf'
flnme = 'cell 4 of 61016.abf'
exp_tracefile = f'../../Raw_data/Deepanjali_data/WT step input cells/{flnme}'
reader = AxonIO(filename=exp_tracefile)
currno = int(Injectcurr*1e12/25+4)
seg = reader.read_block().segments[currno] # 10 means 150pA current
exp_trace = seg.analogsignals[0]
exp_samprate = float(exp_trace.sampling_rate)
exp_sampdur = float(exp_trace.t_stop) - float(exp_trace.t_start)
exp_samppoints = int(exp_samprate*exp_sampdur)
if flnme in stim1391:
exp_trace_injstart = 139.1e-3
exp_trace_injend = 639.1e-3
else:
exp_trace_injstart = 81.4e-3
exp_trace_injend = 581.4e-3
exp_trace = np.array(exp_trace).flatten()
return exp_trace
exp_trace = exp_tracef(Injectcurr=Injectcurr)
plt.plot(np.linspace(preStimTime-exp_trace_injstart,preStimTime+exp_sampdur-exp_trace_injstart,exp_samppoints), exp_trace*1e-3, label=flnme)
plt.plot(ttrace,Vtrace, label='Model')
plt.title('300pA injection')
plt.axis([0.4, 1.2, -0.100, 0.060])
plt.legend()
plt.xlabel('Time (s)')
plt.ylabel('Membrane potential (mV)')
print(Parameters)
plt.show()
| [
"[email protected]"
] | |
857d9434c9309e580b2b741fa15785895ffc5948 | 1040b320168c49e3fd784d93ff30923527582d26 | /calm/dsl/api/vm_recovery_point.py | 6c38001e2886ff7ea3c342f1f6e12a8c4d6bc643 | [
"Apache-2.0"
] | permissive | nutanix/calm-dsl | 87eb8a82f202ec0c71b5c8d8fe49db29bdcf2cfc | 56c52702cec4370f551785508d284e5cbe1a744a | refs/heads/master | 2023-08-31T16:43:51.009235 | 2023-08-28T05:20:41 | 2023-08-28T05:20:41 | 227,190,868 | 41 | 59 | Apache-2.0 | 2023-08-28T05:20:43 | 2019-12-10T18:38:58 | Python | UTF-8 | Python | false | false | 195 | py | from .resource import ResourceAPI
class VmRecoveryPointAPI(ResourceAPI):
def __init__(self, connection):
super().__init__(connection, resource_type="nutanix/v1/vm_recovery_points")
| [
"[email protected]"
] | |
ed009c63f5a3c7ece89552ccefba440deb74a17f | 92b031f51f1c52c26d93987005b8209d9bb050a1 | /mamba/pymtl/__init__.py | d775f90607b8d0da2431cc3660e6a9caecfb0394 | [] | no_license | cornell-brg/mamba-dac2018 | e9353b142456768fcc0d3a9f9b3d29e162ad508e | 01efaadc704a8abec9fa7d4b668005ee412a2353 | refs/heads/master | 2020-03-19T04:42:27.349346 | 2018-06-27T14:57:58 | 2018-06-27T15:00:29 | 135,856,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 635 | py |
from model.ConstraintTypes import U, M, RD, WR
from model.Connectable import Wire, InVPort, OutVPort, Interface
from model.RTLComponent import RTLComponent
from passes import SimRTLPass, PrintMetadataPass, EventDrivenPass
from datatypes import *
from datatypes import _bitwidths
__all__ = [
'U','M','RD','WR',
'Wire', 'InVPort', 'OutVPort', 'Interface',
'RTLComponent', 'SimRTLPass', 'PrintMetadataPass', 'EventDrivenPass',
'sext', 'zext', 'clog2', 'concat',
'mk_bits',
] + [ "Bits{}".format(x) for x in _bitwidths ]
from datatypes.bits_import import _use_pymtl_bits
if _use_pymtl_bits:
__all__ += [ 'Bits' ]
| [
"[email protected]"
] | |
d12f0dfc9cecdc462ead55f260a9a8185bd6b3bc | e573b586a921084f29a36f8e2de5afcae2c65ff8 | /tasks/part_2/shape.py | 2b170227c0348f5c73280085dea6db24fbffca36 | [] | no_license | HannaKulba/AdaptiveTraining_English | e69c8a0c444c1fa72b4783ba837cb3d9dc055d91 | 46497dc6827df37f4ebb69671912ef5b934ab6f0 | refs/heads/master | 2020-12-28T15:05:25.762072 | 2020-02-19T14:39:22 | 2020-02-19T14:39:22 | 238,381,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | n = int(input())
if n == 1:
print('You have chosen a square')
elif n == 2:
print('You have chosen a circle')
elif n == 3:
print('You have chosen a triangle')
elif n == 4:
print('You have chosen a rhombus')
else:
print('There is no such shape!')
# if n == 0:
# print('do not move')
# elif n == 1:
# print('move up')
# elif n == 2:
# print('move down')
# elif n == 3:
# print('move left')
# elif n == 4:
# print('move right')
# else:
# print('error!')
| [
"[email protected]"
] | |
c702956a4a2e74d48fbc44229dde4a04631056e3 | ec84619271eac42481231218c9ee653dec99adad | /3. Linear Data Structure/146. Lowercase to Uppercase II.py | 1a3326e9a6fb166e2b6f1f1534d527cf102ead1c | [] | no_license | LingHsiLiu/Algorithm0 | 19a968fffb5466022f9856c36af0364da6472434 | f438e828dc9dd6196ee5809eb8fac21ccb688bf2 | refs/heads/master | 2020-04-04T17:55:48.182172 | 2019-01-02T19:06:57 | 2019-01-02T19:06:57 | 156,142,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # 146. Lowercase to Uppercase II
# Implement an upper method to convert all characters in a string to uppercase.
# You should ignore the characters not in alphabet.
# Example
# Given "abc", return "ABC".
# Given "aBc", return "ABC".
# Given "abC12", return "ABC12".
class Solution:
"""
@param str: A string
@return: A string
"""
def lowercaseToUppercase2(self, str):
# write your code here
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.