blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
953b13229117505cbd103e683fe9a6c35683cc7a | 9434997753b4edec70916aa5e8380c27e88ea2ab | /build/lib.macosx-10.5-x86_64-2.7/pydynet/analysis.py | 0bacd711233e1a5843ef9e507974536ee2cdb982 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | thelahunginjeet/pydynet | c58df847db969d8e92d6ef54e4e53a6efdad5283 | d4e739aebf05f8aec5a9601b804842539d6b9e93 | refs/heads/master | 2021-04-18T23:59:42.597132 | 2020-06-12T04:12:24 | 2020-06-12T04:12:24 | 37,340,404 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 11,440 | py | #!/usr/bin/env python
# encoding: utf-8
"""
analysis.py
@author: Kevin S. Brown (University of Connecticut), Ann M. Hermundstad (UPenn)
Created by Kevin Brown on 2015-03-17.
"""
from numpy import log,exp,mean,abs,log2,sqrt
from numpy import roll,where,histogram,nonzero,delete,zeros_like,array,zeros,newaxis
import networkx as nx
def phi_of_t(y,group=None):
'''
For y(i,t), computes:
phi(t) = |<exp(i*y(t))>|
where the average is computed only over the rows indicated in group. If
group is None, the average is computed over the entire array.
INPUT:
y: array-like, required
y should be an N x t array of floats/doubles.
group : list, optional
group should be a list of rows of y to include in the calculation.
if group is None, all rows are used.
OUTPUT:
phi : array
phi will be an array of size 1 x t, representing the synchonization
index, as a function of time, computed over the input row group
'''
if group is None:
group = range(0,y.shape[0])
# set up the array exp(i*y)
eiy = exp(1j*y)
# do the averaging
phi = eiy[group,:].mean(axis=0)
# now the modulus
return abs(phi)
def codeword_dictionary(spikes):
'''
Computes a dictionary of population codewords from an input spike array.
Codewords are keyed on word (represented as a string) and entries indicate
counts.
INPUT:
spikes: array-like, required
should be an N x t array of integers (0 and 1 only)
OUTPUT:
codewords: dictionary
number of times (up to t) that each word appears
codeseq: list
order of appearance of codewords; each codeword is assigned an
arbitrary number between 0 and N(codewords)-1
codetonum: dictionary
codeword to numerical mapping in codeseq
'''
codeseq = []
codetonum = {}
codewords = {}
N,t = spikes.shape
current = 0
for k in xrange(0,t):
word = ''.join([str(x) for x in spikes[:,k]])
if codewords.has_key(word):
codewords[word] += 1
else:
codewords[word] = 1
codetonum[word] = current
current += 1
codeseq.append(codetonum[word])
return codewords,codetonum,codeseq
def codeword_raster(codewords):
'''
Uses a codeword dictionary to produce a codeword raster (codewords x spikes)
and a numeric array of codeword frequencies.
'''
coderast = []
codenum = []
for k in codewords:
coderast.append([int(c) for c in k])
codenum.append(1.0*codewords[k]/sum(codewords.values()))
coderast = array(coderast)
codenum = array(codenum)
return codenum,coderast
def bin_spikes(spike_array,b=10):
'''
Accepts an input integer array of zeros and ones and bins samples along
the column index. Binned samples are computed according to
value(bin) = max(bin).
'''
n,t = spike_array.shape
binned_array = zeros((n,t/b),dtype=int)
binstart = 0
while t >= binstart + b:
binvals = spike_array[:,binstart:binstart+b].max(axis=1)
binned_array[:,binstart:binstart+b] = binvals[:,newaxis]
binstart += b
return binned_array
def isi_stats(spike_array):
'''
Accepts an N x t input array of 1's and 0's, with a 1 indicating a spike
occurred in that time bin and returns the mean and variance of the interspike
intervals.
INPUT:
spike_array : array, required
spike_array should only contain 1's and 0's; amplitude/phase arrays
should be pre-converted via convert_to_spikes
OUTPUT:
isi_mean : array (N elements)
ISI mean
isi_var : array (N elements)
ISI variance
'''
N = spike_array.shape[0]
isi_mean = zeros(N)
isi_var = zeros(N)
for k in xrange(0,N):
spike_loc = where(spike_array[k,:] == 1)[0]
isi_array = spike_loc - roll(spike_loc,1)
isi_mean[k] = isi_array[1:].mean()
isi_var[k] = isi_array[1:].var()
return isi_mean,isi_var
def discrete_entropy(x,est='ML'):
'''
Computes the entropy of discrete (integer) data.
INPUT:
x: array-like, required
data for which to compute H[x]
est: string, optional
estimator. current options arange
'ML' : maximum-likelihood (plugin)
'MM' : Miller-Maddow corrected
OUTPUT:
H[x]: entropy of x, measured in nats
'''
# do the frequency counting
counts = {}
for xi in x:
if counts.has_key(xi):
counts[xi] += 1
else:
counts[xi] = 1
sumpofx = 1.0*sum(counts.values())
pofx = array(counts.values())/sumpofx
H_ML = -1*(pofx*log(pofx)).sum()
if est == 'ML':
H = H_ML
if est == 'MM':
# nonzero bins have already been removed from pofx
H = H_ML + (len(pofx) - 1.0)/(2.0*len(x))
return H
def entropy(x,bins=10,est='ML'):
'''
Computes the entropy of a continuous (unbinned) set of data x.
INPUT:
x: array-like, required
data for which to compute H[x]
bins: integer, optional
number of bins
est: string, optional
estimator. current options are:
'ML': maximum-likelihood (plugin)
'MM': Miller-Maddow corrected
'JK': Jackknifed estimate (can be slow!)
OUTPUT:
H[x] : entropy of x, measured in nats
'''
cx = histogram(x,bins)[0]
pofx = (1.0*cx)/cx.sum()
# remove zero bins to avoid numerical problems
pofx = pofx[nonzero(pofx)]
H_ML = -1*(pofx*log(pofx)).sum()
if est == 'ML':
H = H_ML
if est == 'MM':
# nonzero bins have already been removed from pofx
H = H_ML + (len(pofx) - 1.0)/(2.0*len(x))
if est == 'JK':
Sc = 0
for i in xrange(0,len(x)):
newx = delete(x,i)
Sc += entropy(newx,bins,'ML')
H_JK = len(x)*H_ML - ((len(x) - 1.0)/len(x))*Sc
H = H_JK
return H
def codeword_complexity(spike_array,norm=True):
'''
Computes the Lempel-Ziv complexity for a series of codewords. If norm is
True, the normalized lz_complexity is returned. Also returns the number
of unique codewords.
'''
N,t = spike_array.shape
# find and count the codewords
codewords,codetonum,codeseq = codeword_dictionary(spike_array)
nunique = len(codetonum.keys())
# compute the non-normalized LZ complexity
lzc = lz_complexity(codeseq)
# normalize if desired
if norm is True:
f = 1.0*array(codewords.values())/t
# source entropy
h = -sum(f*log2(f))
# length term
bn = t/log2(t)
# normalize
lzc = lzc/(h*bn)
return lzc,nunique
def random_lz_complexity(n,p=0.5):
'''
Computes the expected Lempel-Ziv complexity for a random sequence of length
n and expected probability of generating a 1 = p. Useful for normalizing
the raw lz_complexity. This function will behave poorly if p is identically
0 or 1. Therefore, it would be best to estimate p from real (finite length)
strings using pseudocounts.
INPUT:
n : int, required
length of the random sequence
p : float, optional
probability of seeing a 1 in the sequence
'''
# source entropy
h = -p*log2(p) - (1-p)*log2(1-p)
# expected LZ complexity of binary representations of real numbers
bn = n/log2(n)
return h*bn
def lz_complexity(s):
'''
Lempel-Ziv complexity as described in Kaspar and Schuster, Phys. Rev. A.
The input iterable (see below) does not have to be binary (2-element), but
most applications of LZ complexity have used strings of 0s and 1s.
INPUT:
s : string, list, or tuple, required
sequence to calculate complexity for
'''
i, k, l = 0, 1, 1
k_max = 1
n = len(s)-1
lzc = 1
while True:
if s[i+k-1] == s[l+k-1]:
k += 1
if l + k >= n - 1:
lzc += 1
break
else:
if k > k_max:
k_max = k
i += 1
if i == l:
lzc += 1
l += k_max
if l + 1 > n:
break
else:
i = 0
k = 1
k_max = 1
else:
k = 1
return lzc
def complexity(spike_array,method='lz_norm'):
'''
Complexity measure for each node's spiking pattern. Could dispatch to a
variety of measures. Returns an array of length equal to spike_array.shape[0].
'''
N,T = spike_array.shape
c = zeros(N)
if method == 'lz_norm':
for i in xrange(0,N):
# spike string
s = ''.join([str(x) for x in spike_array[i,:]])
# probability of generating a 1
p = (sum(spike_array[i,:]) + 1.0)/(T + 2.0)
# compute normalized LZ complexity
c[i] = 1.0*lz_complexity(s)/random_lz_complexity(T,p)
if method == 'lz':
for i in xrange(0,N):
# spike string
s = ''.join([str(x) for x in spike_array[i,:]])
# non-normalized lz complexity
c[i] = lz_complexity(s)
return c
def node_assortativity(net,attribute,jackknife=True,atype='numeric'):
'''
Computes the assortativity coefficient and optional sampling error (via the
jackknife) for the desired attribute over the network net. In addition, this
only works as expected for unweighted, undirected graphs.
This function assumes the input nodes are not already decorated with the attribute;
this will almost always be the case when the attribute arises as a post-simulation
calculation on the dynamics of the network.
INPUT:
net: PulseOscillatorNetwork (or networkx graph), required
input network
attribute : dictionary, required
key/value pairs for the attribute; keys should be valid node names
in the network net
jackknife : bool, optional
set to True to compute the expected sampling variance
atype : string, optional
set to 'numeric' for integer point attributes and
'categorial' for categorical attributes
OUTPUT:
r : float
numerical attribute assortativity coefficient (-1 < r <= 1)
sigmar : float, optional
jackknife standard deviation of r
'''
# set the correct assortativity function
if atype is 'numeric':
afunc = nx.numeric_assortativity_coefficient
else:
afunc = nx.attribute_assortativity_coefficient
# create a new graph
G = nx.Graph()
# add nodes from the network, with attributes
for n in net.nodes():
G.add_node(n,value=attribute[n])
G.add_edges_from(net.edges())
r = afunc(G,'value')
if jackknife:
sigmarsq = 0.0
# remove one edge at a time, recompute, then add it back
for e in G.edges():
G.remove_edge(e[0],e[1])
sigmarsq += (afunc(G,'value') - r)**2
G.add_edge(e[0],e[1])
return r,sqrt(sigmarsq/len(G.edges()))
else:
return r
| [
"[email protected]"
] | |
7821a32281f931b75f37a80413b9ec794a2804e3 | 1377e0c1b2200f5130b149ff77cf0fda5a1d2aa9 | /src/pmr2/client/script.py | 6cbae7a5414bf1624b23dd63f667976f1e80867c | [] | no_license | alan-wu/pmr2.client | c97ef8371be931da281eba33082d649ce65e1201 | 3dc6afa008159acaa5b8bde4b291920ea3eceb3d | refs/heads/master | 2020-04-04T21:19:54.317615 | 2014-09-01T02:50:32 | 2014-09-01T02:50:52 | 156,282,171 | 0 | 0 | null | 2018-11-05T20:58:31 | 2018-11-05T20:58:30 | null | UTF-8 | Python | false | false | 7,941 | py | import os.path
import traceback
import json
import code
import pdb
import webbrowser
from urllib import quote_plus
from requests_oauthlib.oauth1_session import TokenRequestDenied
try:
import readline
except ImportError:
pass
from pmr2.client import Client
from pmr2.client import DemoAuthClient
HOME = os.path.expanduser('~')
CONFIG_FILENAME = os.path.join(HOME, '.pmr2clirc')
PMR2ROOT = 'http://staging.physiomeproject.org'
CONSUMER_KEY = 'ovYoqjlJLrpCcEWcIFyxtqRS'
CONSUMER_SECRET = 'fHssEYMWZzgo6JWUBh4l1bhd'
DEFAULT_SCOPE = quote_plus(
'http://staging.physiomeproject.org/oauth_scope/collection,'
'http://staging.physiomeproject.org/oauth_scope/search,'
'http://staging.physiomeproject.org/oauth_scope/workspace_tempauth,'
'http://staging.physiomeproject.org/oauth_scope/workspace_full'
)
class Cli(object):
token_key = ''
token_secret = ''
active = False
state = None
_debug = 0
last_response = None
def __init__(self,
site=PMR2ROOT,
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
scope=DEFAULT_SCOPE,
):
self.auth_client = DemoAuthClient(site, consumer_key, consumer_secret)
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
if isinstance(value, int):
self._debug = value
if isinstance(value, basestring):
if value.lower() in ('false', 'no', '0',):
self._debug = 0
else:
self._debug = 1
def build_config(self):
return {
'token_key':
self.auth_client.session._client.client.resource_owner_key,
'token_secret':
self.auth_client.session._client.client.resource_owner_secret,
'debug': self.debug,
'scope': DEFAULT_SCOPE,
}
def load_config(self, filename=CONFIG_FILENAME):
try:
with open(filename, 'r') as fd:
config = json.load(fd)
except IOError:
print("Fail to open configuration file.")
config = self.build_config()
except ValueError:
print("Fail to decode JSON configuration. Using default values.")
config = self.build_config()
token = config.get('token_key', '')
secret = config.get('token_secret', '')
self.auth_client.session._client.client.resource_owner_key = token
self.auth_client.session._client.client.resource_owner_secret = secret
self.debug = config.get('debug', 0)
self.scope = config.get('scope', DEFAULT_SCOPE)
return token and secret
def save_config(self, filename=CONFIG_FILENAME):
try:
with open(filename, 'wb') as fd:
json.dump(self.build_config(), fd)
except IOError:
print("Error saving configuration")
def get_access(self):
# get user to generate one.
try:
self.auth_client.fetch_request_token(scope=self.scope)
except Exception as e:
print('Fail to request temporary credentials.')
return
target = self.auth_client.authorization_url()
webbrowser.open(target)
verifier = raw_input('Please enter the verifier: ')
self.auth_client.set_verifier(verifier=verifier)
token = self.auth_client.fetch_access_token()
return True
def do_help(self, arg=''):
"""
Print this message.
"""
print('Basic demo commands:')
print('')
for name in sorted(dir(self)):
if not name.startswith('do_'):
continue
obj = getattr(self, name)
if not callable(obj):
continue
print(name[3:])
print(obj.__doc__)
def do_console(self, arg=''):
"""
Start the interactive python console.
"""
console = code.InteractiveConsole(locals=locals())
result = console.interact('')
def do_dashboard(self, arg=''):
"""
List out the features available on the dashboard.
"""
dashboard = self.client(endpoint='dashboard')
if not arg:
for k, v in dashboard.value().items():
print('%s\t%s\t%s' % (k, v['label'], v['target']))
return
self.state = dashboard.get(arg)
print('Acquired state "%s"; use console to interact.') % arg
def do_list_workspace(self, arg=''):
"""
Returns a list of workspaces within your private workspace
container.
"""
dashboard = self.client(endpoint='dashboard')
state = dashboard.get('workspace-home')
for i in state.value():
print('"%s"\t%s' % (i['title'], i['target']))
def do_raw(self, arg=''):
"""
Open a target URL to receive raw API output.
"""
a = arg.split(None, 1)
url = ''.join(a[:1])
data = ''.join(a[1:])
if not url:
print("URL is required.")
return
if not data:
self.state = self.client(url)
else:
self.state = self.client(url, data=data)
print(self.client.last_response.json())
def do_property(self, arg=''):
"""
Set property for this object.
"""
permitted = ['debug']
a = arg.split()
if len(a) < 1:
print("need both key and values.")
return
args = list(arg.split())
prop = args.pop(0)
if len(a) < 2:
print('%s = %s') % (prop, getattr(self, prop))
return
if prop not in permitted:
print("'%s' cannot be set") % prop
return
setattr(self, prop, ' '.join(args))
def shell(self):
while self.active:
try:
raw = raw_input('pmr2cli> ')
if not raw:
continue
rawargs = raw.split(None, 1)
command = rawargs.pop(0)
obj = getattr(self, 'do_' + command, None)
if callable(obj):
obj(*rawargs)
else:
print("Invalid command, try 'help'.")
except EOFError:
self.active = False
print('')
except KeyboardInterrupt:
print('\nGot interrupt signal.')
self.active = False
except ValueError:
print("Couldn't decode json.")
# print("Status was %d") % self.last_response.status_code
print("Use console to check `self.last_response` for details.")
except:
print(traceback.format_exc())
if self.debug:
pdb.post_mortem()
def run(self):
access = self.load_config()
if not access:
try:
access = self.get_access()
except TokenRequestDenied as e:
print('Fail to validate the verifier.')
if not access:
self.save_config()
return
self.client = Client(PMR2ROOT,
session=self.auth_client.session, use_default_headers=True)
try:
self.client()
except ValueError as e:
# JSON decoding error
print('Credentials are invalid and are purged. Quitting')
self.auth_client.session._client.client.resource_owner_key = ''
self.auth_client.session._client.client.resource_owner_secret = ''
self.scope = DEFAULT_SCOPE
self.save_config()
return
self.active = True
print('Starting PMR2 Demo Shell...')
self.save_config()
self.shell()
if __name__ == '__main__':
cli = Cli()
cli.run()
| [
"[email protected]"
] | |
f5e09da800b6be9e3ad3cd52937aa943b1c2ee6d | f087d996fd8164dc4fcf9b312533e51bd42029ae | /products/urls.py | cf8edd5c4698904055f1302df72e09cd2c83a3fe | [] | no_license | Mohamed-Kudratov/Furniture_store | 364abc300a3c00b999d54e45badfc7c8ca998e90 | 98754515937c1d7d934a75f0fe6e5f600a69b5e4 | refs/heads/main | 2023-07-18T04:40:16.770467 | 2021-08-30T15:31:16 | 2021-08-30T15:31:16 | 399,190,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.urls import path
from products.views import ProductListView
app_name = 'products'
urlpatterns = [
path('', ProductListView.as_view(), name='list')
] | [
"[email protected]"
] | |
9d0fe1cb7381c95d5401a723dc80ac5b70db6e8e | 4d9bb813b23f59da3e53e9e5932b9b4f2ec2f876 | /backend/manage.py | bb67affcea9ec7200a162f7d747ce71d48c5541c | [] | no_license | crowdbotics-apps/mobile-27-dev-6716 | 79939276c5ff7f54a7fb35a4267a5a0663f8ff87 | bbc168bb35c98604d7afbf074b4d6068790c1192 | refs/heads/master | 2022-11-11T10:25:45.638298 | 2020-06-27T06:41:06 | 2020-06-27T06:41:06 | 275,303,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_27_dev_6716.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
75ccd59f1058f4fa224b44f4ba3b6b7670f1bca4 | c8c77f6cc6c032daf179ea2138e4dda5473b426b | /s3/s3-python-example-download-file.py | 44226fc6b8e67109cfe37b2a8ae8611f54e25ad1 | [] | no_license | arunmastermind/AWS-examples-using-BOTO3 | b411a6c96011ab58a66952a53fa2938cb58d5135 | e8390094374c10902bab016a21caba75ea179b5a | refs/heads/master | 2020-09-30T13:34:33.657621 | 2019-12-11T12:37:44 | 2019-12-11T12:37:44 | 227,297,211 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import boto3
import botocore
BUCKET_NAME = 'my-bucket' # replace with your bucket name
KEY = 'my_image_in_s3.jpg' # replace with your object key
s3 = boto3.resource('s3')
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, 'my_local_image.jpg')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise | [
"[email protected]"
] | |
9def36becf9665b78190a6e896d8622be917634c | 4668b8330bb287eef380f990cce3d076bf9456df | /venv/lib/python3.6/site-packages/ray/__init__.py | eb02bacfc63ecab548d282500c5d067bd6463a88 | [] | no_license | Ali-Khakpash/redis-flask-training | 1f7bb1745f224c752bbdb338f4bb4da5ad65f3fb | 1d5a59a97486e734cb7b08ddb40c8aaeddd429d8 | refs/heads/master | 2020-11-25T10:19:25.553265 | 2020-03-12T19:03:36 | 2020-03-12T19:03:36 | 228,612,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,271 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from os.path import dirname
import sys
# MUST add pickle5 to the import path because it will be imported by some
# raylet modules.
if "pickle5" in sys.modules:
raise ImportError("Ray must be imported before pickle5 because Ray "
"requires a specific version of pickle5 (which is "
"packaged along with Ray).")
# Add the directory containing pickle5 to the Python path so that we find the
# pickle5 version packaged with ray and not a pre-existing pickle5.
pickle5_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pickle5_files")
sys.path.insert(0, pickle5_path)
# Expose ray ABI symbols which may be dependent by other shared
# libraries such as _streaming.so. See BUILD.bazel:_raylet
so_path = os.path.join(dirname(__file__), "_raylet.so")
if os.path.exists(so_path):
import ctypes
from ctypes import CDLL
CDLL(so_path, ctypes.RTLD_GLOBAL)
# MUST import ray._raylet before pyarrow to initialize some global variables.
# It seems the library related to memory allocation in pyarrow will destroy the
# initialization of grpc if we import pyarrow at first.
# NOTE(JoeyJiang): See https://github.com/ray-project/ray/issues/5219 for more
# details.
import ray._raylet # noqa: E402
if "pyarrow" in sys.modules:
raise ImportError("Ray must be imported before pyarrow because Ray "
"requires a specific version of pyarrow (which is "
"packaged along with Ray).")
# Add the directory containing pyarrow to the Python path so that we find the
# pyarrow version packaged with ray and not a pre-existing pyarrow.
pyarrow_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
sys.path.insert(0, pyarrow_path)
# See https://github.com/ray-project/ray/issues/131.
helpful_message = """
If you are using Anaconda, try fixing this problem by running:
conda install libgcc
"""
try:
import pyarrow # noqa: F401
# pyarrow is not imported inside of _raylet because of the issue described
# above. In order for Cython to compile _raylet, pyarrow is set to None
# in _raylet instead, so we give _raylet a real reference to it here.
# We first do the attribute checks here so that building the documentation
# succeeds without fully installing ray..
# TODO(edoakes): Fix this.
if hasattr(ray, "_raylet") and hasattr(ray._raylet, "pyarrow"):
ray._raylet.pyarrow = pyarrow
except ImportError as e:
if ((hasattr(e, "msg") and isinstance(e.msg, str)
and ("libstdc++" in e.msg or "CXX" in e.msg))):
# This code path should be taken with Python 3.
e.msg += helpful_message
elif (hasattr(e, "message") and isinstance(e.message, str)
and ("libstdc++" in e.message or "CXX" in e.message)):
# This code path should be taken with Python 2.
condition = (hasattr(e, "args") and isinstance(e.args, tuple)
and len(e.args) == 1 and isinstance(e.args[0], str))
if condition:
e.args = (e.args[0] + helpful_message, )
else:
if not hasattr(e, "args"):
e.args = ()
elif not isinstance(e.args, tuple):
e.args = (e.args, )
e.args += (helpful_message, )
raise
from ray._raylet import (
ActorCheckpointID,
ActorClassID,
ActorID,
ClientID,
Config as _Config,
JobID,
WorkerID,
FunctionID,
ObjectID,
TaskID,
UniqueID,
) # noqa: E402
_config = _Config()
from ray.profiling import profile # noqa: E402
from ray.state import (global_state, jobs, nodes, tasks, objects, timeline,
object_transfer_timeline, cluster_resources,
available_resources, errors) # noqa: E402
from ray.worker import (
LOCAL_MODE,
SCRIPT_MODE,
WORKER_MODE,
connect,
disconnect,
get,
get_gpu_ids,
get_resource_ids,
get_webui_url,
init,
is_initialized,
put,
register_custom_serializer,
remote,
shutdown,
wait,
) # noqa: E402
import ray.internal # noqa: E402
import ray.projects # noqa: E402
# We import ray.actor because some code is run in actor.py which initializes
# some functions in the worker.
import ray.actor # noqa: F401
from ray.actor import method # noqa: E402
from ray.runtime_context import _get_runtime_context # noqa: E402
# Ray version string.
__version__ = "0.8.0"
__all__ = [
"global_state",
"jobs",
"nodes",
"tasks",
"objects",
"timeline",
"object_transfer_timeline",
"cluster_resources",
"available_resources",
"errors",
"LOCAL_MODE",
"PYTHON_MODE",
"SCRIPT_MODE",
"WORKER_MODE",
"__version__",
"_config",
"_get_runtime_context",
"actor",
"connect",
"disconnect",
"get",
"get_gpu_ids",
"get_resource_ids",
"get_webui_url",
"init",
"internal",
"is_initialized",
"method",
"profile",
"projects",
"put",
"register_custom_serializer",
"remote",
"shutdown",
"wait",
]
# ID types
__all__ += [
"ActorCheckpointID",
"ActorClassID",
"ActorID",
"ClientID",
"JobID",
"WorkerID",
"FunctionID",
"ObjectID",
"TaskID",
"UniqueID",
]
import ctypes # noqa: E402
# Windows only
if hasattr(ctypes, "windll"):
# Makes sure that all child processes die when we die. Also makes sure that
# fatal crashes result in process termination rather than an error dialog
# (the latter is annoying since we have a lot of processes). This is done
# by associating all child processes with a "job" object that imposes this
# behavior.
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
| [
"[email protected]"
] | |
7896761a2d9876cb6e6e8e6d766e7a025d168829 | 747eeeed1056b69a8bde6364ee9bf266523f19e5 | /Important libraries/Numpy.py | 940902f2074d483296c520004216c369c0df94d0 | [] | no_license | LittleAndroidBunny/Python_Cheatsheet_Nohar_Batit | d18a77d455474834da99c11e763beea598947f7c | a53f5e3a635bb47012fceb50efd43ad124eff180 | refs/heads/main | 2023-06-10T10:40:54.746084 | 2021-07-03T23:25:17 | 2021-07-03T23:25:17 | 382,595,063 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,146 | py | # -*- coding: utf-8 -*-
"""colab-tutorial.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/cs231n/cs231n.github.io/blob/master/python-colab.ipynb
#CS231n Python Tutorial With Google Colab
"""
"""##Introduction
Python is a great general-purpose programming language on its own, but with the help of a few popular libraries (
numpy, scipy, matplotlib) it becomes a powerful environment for scientific computing.
We expect that many of you will have some experience with Python and numpy; for the rest of you, this section will
serve as a quick crash course both on the Python programming language and on the use of Python for scientific
computing.
Some of you may have previous knowledge in Matlab, in which case we also recommend the numpy for Matlab users page (
https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html).
In this tutorial, we will cover:
* Basic Python: Basic data types (Containers, Lists, Dictionaries, Sets, Tuples), Functions, Classes
* Numpy: Arrays, Array indexing, Datatypes, Array math, Broadcasting
* Matplotlib: Plotting, Subplots, Images
* IPython: Creating notebooks, Typical workflows
## A Brief Note on Python Versions
As of Janurary 1, 2020, Python has [officially dropped support](https://www.python.org/doc/sunset-python-2/) for
`python2`. We'll be using Python 3.7 for this iteration of the course. You can check your Python version at the
command line by running `python --version`. In Colab, we can enforce the Python version by clicking `Runtime ->
Change Runtime Type` and selecting `python3`. Note that as of April 2020, Colab uses Python 3.6.9 which should run
everything without any errors. """
# !python --version
"""##Basics of Python
Python is a high-level, dynamically typed multiparadigm programming language. Python code is often said to be almost
like pseudocode, since it allows you to express very powerful ideas in very few lines of code while being very
readable. As an example, here is an implementation of the classic quicksort algorithm in Python: """
def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [l for l in arr if l < pivot]
middle = [m for m in arr if m == pivot]
right = [p for p in arr if p > pivot]
return quicksort(left) + middle + quicksort(right)
print(quicksort([3, 6, 8, 10, 1, 2, 1]))
"""###Basic data types
####Numbers
Integers and floats work as you would expect from other languages:
"""
x = 3
print(x, type(x))
print(x + 1) # Addition
print(x - 1) # Subtraction
print(x * 2) # Multiplication
print(x ** 2) # Exponentiation
x += 1
print(x)
x *= 2
print(x)
y = 2.5
print(type(y))
print(y, y + 1, y * 2, y ** 2)
"""Note that unlike many languages, Python does not have unary increment (x++) or decrement (x--) operators.
Python also has built-in types for long integers and complex numbers; you can find all of the details in the [
documentation](https://docs.python.org/3.7/library/stdtypes.html#numeric-types-int-float-long-complex).
####Booleans
Python implements all of the usual operators for Boolean logic, but uses English words rather than symbols (`&&`,
`||`, etc.): """
t, f = True, False
print(type(t))
"""Now we let's look at the operations:"""
print(t and f) # Logical AND;
print(t or f) # Logical OR;
print(not t) # Logical NOT;
print(t != f) # Logical XOR;
"""####Strings"""
hello = 'hello' # String literals can use single quotes
world = "world" # or double quotes; it does not matter
print(hello, len(hello))
hw = hello + ' ' + world # String concatenation
print(hw)
hw12 = '{} {} {}'.format(hello, world, 12) # string formatting
print(hw12)
"""String objects have a bunch of useful methods; for example:"""
s = "hello"
print(s.capitalize()) # Capitalize a string
print(s.upper()) # Convert a string to uppercase; prints "HELLO"
print(s.rjust(7)) # Right-justify a string, padding with spaces
print(s.center(7)) # Center a string, padding with spaces
print(s.replace('l', '(ell)')) # Replace all instances of one substring with another
print(' world '.strip()) # Strip leading and trailing whitespace
"""You can find a list of all string methods in the [documentation](
https://docs.python.org/3.7/library/stdtypes.html#string-methods).
###Containers
Python includes several built-in container types: lists, dictionaries, sets, and tuples.
####Lists
A list is the Python equivalent of an array, but is resizeable and can contain elements of different types:
"""
xs = [3, 1, 2] # Create a list
print(xs, xs[2])
print(xs[-1]) # Negative indices count from the end of the list; prints "2"
xs[2] = 'foo' # Lists can contain elements of different types
print(xs)
xs.append('bar') # Add a new element to the end of the list
print(xs)
x = xs.pop() # Remove and return the last element of the list
print(x, xs)
"""As usual, you can find all the gory details about lists in the [documentation](
https://docs.python.org/3.7/tutorial/datastructures.html#more-on-lists).
####Slicing
In addition to accessing list elements one at a time, Python provides concise syntax to access sublists; this is
known as slicing: """
nums = list(range(5)) # range is a built-in function that creates a list of integers
print(nums) # Prints "[0, 1, 2, 3, 4]"
print(nums[2:4]) # Get a slice from index 2 to 4 (exclusive); prints "[2, 3]"
print(nums[2:]) # Get a slice from index 2 to the end; prints "[2, 3, 4]"
print(nums[:2]) # Get a slice from the start to index 2 (exclusive); prints "[0, 1]"
print(nums[:]) # Get a slice of the whole list; prints ["0, 1, 2, 3, 4]"
print(nums[:-1]) # Slice indices can be negative; prints ["0, 1, 2, 3]"
nums[2:4] = [8, 9] # Assign a new sublist to a slice
print(nums) # Prints "[0, 1, 8, 9, 4]"
"""####Loops
You can loop over the elements of a list like this:
"""
animals = ['cat', 'dog', 'monkey']
for animal in animals:
print(animal)
"""If you want access to the index of each element within the body of a loop, use the built-in `enumerate` function:"""
animals = ['cat', 'dog', 'monkey']
for idx, animal in enumerate(animals):
print('#{}: {}'.format(idx + 1, animal))
"""####List comprehensions:
When programming, frequently we want to transform one type of data into another. As a simple example, consider the
following code that computes square numbers: """
nums = [0, 1, 2, 3, 4]
squares = []
for x in nums:
squares.append(x ** 2)
print(squares)
"""You can make this code simpler using a list comprehension:"""
nums = [0, 1, 2, 3, 4]
squares = [x ** 2 for x in nums]
print(squares)
"""List comprehensions can also contain conditions:"""
nums = [0, 1, 2, 3, 4]
even_squares = [x ** 2 for x in nums if x % 2 == 0]
print(even_squares)
"""####Dictionaries
A dictionary stores (key, value) pairs, similar to a `Map` in Java or an object in Javascript. You can use it like this:
"""
d = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data
print(d['cat']) # Get an entry from a dictionary; prints "cute"
print('cat' in d) # Check if a dictionary has a given key; prints "True"
d['fish'] = 'wet' # Set an entry in a dictionary
print(d['fish']) # Prints "wet"
# print(d['monkey']) # KeyError: 'monkey' not a key of d
print(d.get('monkey', 'N/A')) # Get an element with a default; prints "N/A"
print(d.get('fish', 'N/A')) # Get an element with a default; prints "wet"
del d['fish'] # Remove an element from a dictionary
print(d.get('fish', 'N/A')) # "fish" is no longer a key; prints "N/A"
"""You can find all you need to know about dictionaries in the [documentation](
https://docs.python.org/2/library/stdtypes.html#dict).
It is easy to iterate over the keys in a dictionary:
"""
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal, legs in d.items():
print('A {} has {} legs'.format(animal, legs))
"""Dictionary comprehensions: These are similar to list comprehensions, but allow you to easily construct
dictionaries. For example: """
nums = [0, 1, 2, 3, 4]
even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0}
print(even_num_to_square)
"""####Sets
A set is an unordered collection of distinct elements. As a simple example, consider the following:
"""
animals = {'cat', 'dog'}
print('cat' in animals) # Check if an element is in a set; prints "True"
print('fish' in animals) # prints "False"
animals.add('fish') # Add an element to a set
print('fish' in animals)
print(len(animals)) # Number of elements in a set;
animals.add('cat') # Adding an element that is already in the set does nothing
print(len(animals))
animals.remove('cat') # Remove an element from a set
print(len(animals))
"""_Loops_: Iterating over a set has the same syntax as iterating over a list; however since sets are unordered,
you cannot make assumptions about the order in which you visit the elements of the set: """
animals = {'cat', 'dog', 'fish'}
for idx, animal in enumerate(animals):
print('#{}: {}'.format(idx + 1, animal))
"""Set comprehensions: Like lists and dictionaries, we can easily construct sets using set comprehensions:"""
from math import sqrt
print({int(sqrt(x)) for x in range(30)})
"""####Tuples
A tuple is an (immutable) ordered list of values. A tuple is in many ways similar to a list; one of the most
important differences is that tuples can be used as keys in dictionaries and as elements of sets, while lists cannot.
Here is a trivial example: """
d = {(x, x + 1): x for x in range(10)} # Create a dictionary with tuple keys
t = (5, 6) # Create a tuple
print(type(t))
print(d[t])
print(d[(1, 2)])
# t[0] = 1
"""###Functions
Python functions are defined using the `def` keyword. For example:
"""
def sign(w):
if w > 0:
return 'positive'
elif w < 0:
return 'negative'
else:
return 'zero'
for x in [-1, 0, 1]:
print(sign(x))
"""We will often define functions to take optional keyword arguments, like this:"""
def hello(name, loud=False):
if loud:
print('HELLO, {}'.format(name.upper()))
else:
print('Hello, {}!'.format(name))
hello('Bob')
hello('Fred', loud=True)
"""###Classes
The syntax for defining classes in Python is straightforward:
"""
class Greeter:
# Constructor
def __init__(self, name):
self.name = name # Create an instance variable
# Instance method
def greet(self, loud=False):
if loud:
print('HELLO, {}'.format(self.name.upper()))
else:
print('Hello, {}!'.format(self.name))
g = Greeter('Fred') # Construct an instance of the Greeter class
g.greet() # Call an instance method; prints "Hello, Fred"
g.greet(loud=True) # Call an instance method; prints "HELLO, FRED!"
"""##Numpy
Numpy is the core library for scientific computing in Python. It provides a high-performance multidimensional array
object, and tools for working with these arrays. If you are already familiar with MATLAB, you might find this [
tutorial](http://wiki.scipy.org/NumPy_for_Matlab_Users) useful to get started with Numpy.
To use Numpy, we first need to import the `numpy` package:
"""
import numpy as np
"""###Arrays
A numpy array is a grid of values, all of the same type, and is indexed by a tuple of nonnative integers. The
number of dimensions is the rank of the array; the shape of an array is a tuple of integers giving the size of the
array along each dimension.
We can initialize numpy arrays from nested Python lists, and access elements using square brackets:
"""
a = np.array([1, 2, 3]) # Create a rank 1 array
print(type(a), a.shape, a[0], a[1], a[2])
a[0] = 5 # Change an element of the array
print(a)
b = np.array([[1, 2, 3], [4, 5, 6]]) # Create a rank 2 array
print(b)
print(b.shape)
print(b[0, 0], b[0, 1], b[1, 0])
"""Numpy also provides many functions to create arrays:"""
a = np.zeros((2, 2)) # Create an array of all zeros
print(a)
b = np.ones((1, 2)) # Create an array of all ones
print(b)
c = np.full((2, 2), 7) # Create a constant array
print(c)
d = np.eye(2) # Create a 2x2 identity matrix
print(d)
e = np.random.random((2, 2)) # Create an array filled with random values
print(e)
"""###Array indexing
Numpy offers several ways to index into arrays.
Slicing: Similar to Python lists, numpy arrays can be sliced. Since arrays may be multidimensional, you must specify
a slice for each dimension of the array: """
import numpy as np
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
# Use slicing to pull out the subarray consisting of the first 2 rows
# and columns 1 and 2; b is the following array of shape (2, 2):
# [[2 3]
# [6 7]]
b = a[:2, 1:3]
print(b)
"""A slice of an array is a view into the same data, so modifying it will modify the original array."""
print(a[0, 1])
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print(a[0, 1])
"""You can also mix integer indexing with slice indexing. However, doing so will yield an array of lower rank than
the original array. Note that this is quite different from the way that MATLAB handles array slicing: """
# Create the following rank 2 array with shape (3, 4)
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(a)
"""Two ways of accessing the data in the middle row of the array.
Mixing integer indexing with slices yields an array of lower rank,
while using only slices yields an array of the same rank as the
original array:
"""
row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
row_r3 = a[[1], :] # Rank 2 view of the second row of a
print(row_r1, row_r1.shape)
print(row_r2, row_r2.shape)
print(row_r3, row_r3.shape)
# We can make the same distinction when accessing columns of an array:
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print(col_r1, col_r1.shape)
print()
print(col_r2, col_r2.shape)
"""Integer array indexing: When you index into numpy arrays using slicing, the resulting array view will always be a
subarray of the original array. In contrast, integer array indexing allows you to construct arbitrary arrays using
the data from another array. Here is an example: """
a = np.array([[1, 2], [3, 4], [5, 6]])
# An example of integer array indexing.
# The returned array will have shape (3,) and
print(a[[0, 1, 2], [0, 1, 0]])
# The above example of integer array indexing is equivalent to this:
print(np.array([a[0, 0], a[1, 1], a[2, 0]]))
# When using integer array indexing, you can reuse the same
# element from the source array:
print(a[[0, 0], [1, 1]])
# Equivalent to the previous integer array indexing example
print(np.array([a[0, 1], a[0, 1]]))
"""One useful trick with integer array indexing is selecting or mutating one element from each row of a matrix:"""
# Create a new array from which we will select elements
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
print(a)
# Create an array of indices
b = np.array([0, 2, 0, 1])
# Select one element from each row of a using the indices in b
print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]"
# Mutate one element from each row of a using the indices in b
a[np.arange(4), b] += 10
print(a)
"""Boolean array indexing: Boolean array indexing lets you pick out arbitrary elements of an array. Frequently this
type of indexing is used to select the elements of an array that satisfy some condition. Here is an example: """
import numpy as np
a = np.array([[1, 2], [3, 4], [5, 6]])
bool_idx = (a > 2) # Find the elements of a that are bigger than 2;
# this returns a numpy array of Booleans of the same
# shape as a, where each slot of bool_idx tells
# whether that element of a is > 2.
print(bool_idx)
# We use boolean array indexing to construct a rank 1 array
# consisting of the elements of a corresponding to the True values
# of bool_idx
print(a[bool_idx])
# We can do all of the above in a single concise statement:
print(a[a > 2])
"""For brevity we have left out a lot of details about numpy array indexing; if you want to know more you should read
the documentation.
###Datatypes
Every numpy array is a grid of elements of the same type. Numpy provides a large set of numeric datatype that you
can use to construct arrays. Numpy tries to guess a datatype when you create an array, but functions that construct
arrays usually also include an optional argument to explicitly specify the datatype. Here is an example: """
x = np.array([1, 2]) # Let numpy choose the datatype
y = np.array([1.0, 2.0]) # Let numpy choose the datatype
z = np.array([1, 2], dtype=np.int64) # Force a particular datatype
print(x.dtype, y.dtype, z.dtype)
"""You can read all about numpy datatype in the [documentation](
http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html).
###Array math
Basic mathematical functions operate elementwise on arrays, and are available both as operator overloads and as
functions in the numpy module: """
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = np.array([[5, 6], [7, 8]], dtype=np.float64)
# Elementwise sum; both produce the array
print(x + y)
print(np.add(x, y))
# Elementwise difference; both produce the array
print(x - y)
print(np.subtract(x, y))
# Elementwise product; both produce the array
print(x * y)
print(np.multiply(x, y))
# Elementwise division; both produce the array
# [[ 0.2 0.33333333]
# [ 0.42857143 0.5 ]]
print(x / y)
print(np.divide(x, y))
# Elementwise square root; produces the array
# [[ 1. 1.41421356]
# [ 1.73205081 2. ]]
print(np.sqrt(x))
"""Note that unlike MATLAB, `*` is elementwise multiplication, not matrix multiplication. We instead use the dot
function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. dot is
available both as a function in the numpy module and as an instance method of array objects: """
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
v = np.array([9, 10])
w = np.array([11, 12])
# Inner product of vectors; both produce 219
print(v.dot(w))
print(np.dot(v, w))
"""You can also use the `@` operator which is equivalent to numpy's `dot` operator."""
print(v @ w)
# Matrix / vector product; both produce the rank 1 array [29 67]
print(x.dot(v))
print(np.dot(x, v))
print(x @ v)
# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
# [43 50]]
print(x.dot(y))
print(np.dot(x, y))
print(x @ y)
"""Numpy provides many useful functions for performing computations on arrays; one of the most useful is `sum`:"""
x = np.array([[1, 2], [3, 4]])
print(np.sum(x)) # Compute sum of all elements; prints "10"
print(np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]"
print(np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]"
"""You can find the full list of mathematical functions provided by numpy in the [documentation](
http://docs.scipy.org/doc/numpy/reference/routines.math.html).
Apart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data
in arrays. The simplest example of this type of operation is transposing a matrix; to transpose a matrix, simply use
the T attribute of an array object: """
print(x)
print("transpose\n", x.T)
v = np.array([[1, 2, 3]])
print(v)
print("transpose\n", v.T)
"""###Broadcasting
Broadcasting is a powerful mechanism that allows numpy to work with arrays of different shapes when performing
arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array
multiple times to perform some operation on the larger array.
For example, suppose that we want to add a constant vector to each row of a matrix. We could do it like this:
"""
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = np.empty_like(x) # Create an empty matrix with the same shape as x
# Add the vector v to each row of the matrix x with an explicit loop
for i in range(4):
y[i, :] = x[i, :] + v
print(y)
"""This works; however when the matrix `x` is very large, computing an explicit loop in Python could be slow. Note
that adding the vector v to each row of the matrix `x` is equivalent to forming a matrix `vv` by stacking multiple
copies of `v` vertically, then performing elementwise summation of `x` and `vv`. We could implement this approach
like this: """
vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other
print(vv) # Prints "[[1 0 1]
# [1 0 1]
# [1 0 1]
# [1 0 1]]"
y = x + vv # Add x and vv elementwise
print(y)
"""Numpy broadcasting allows us to perform this computation without actually creating multiple copies of v. Consider
this version, using broadcasting: """
import numpy as np
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = x + v # Add v to each row of x using broadcasting
print(y)
"""The line `y = x + v` works even though `x` has shape `(4, 3)` and `v` has shape `(3,)` due to broadcasting; this
line works as if v actually had shape `(4, 3)`, where each row was a copy of `v`, and the sum was performed
elementwise.
Broadcasting two arrays together follows these rules:
1. If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have
the same length. 2. The two arrays are said to be compatible in a dimension if they have the same size in the
dimension, or if one of the arrays has size 1 in that dimension. 3. The arrays can be broadcast together if they are
compatible in all dimensions. 4. After broadcasting, each array behaves as if it had shape equal to the elementwise
maximum of shapes of the two input arrays. 5. In any dimension where one array had size 1 and the other array had
size greater than 1, the first array behaves as if it were copied along that dimension
If this explanation does not make sense, try reading the explanation from the [documentation](
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) or this [explanation](
http://wiki.scipy.org/EricsBroadcastingDoc).
Functions that support broadcasting are known as universal functions. You can find the list of all universal
functions in the [documentation](http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs).
Here are some applications of broadcasting:
"""
# Compute outer product of vectors
v = np.array([1, 2, 3]) # v has shape (3,)
w = np.array([4, 5]) # w has shape (2,)
# To compute an outer product, we first reshape v to be a column
# vector of shape (3, 1); we can then broadcast it against w to yield
# an output of shape (3, 2), which is the outer product of v and w:
print(np.reshape(v, (3, 1)) * w)
# Add a vector to each row of a matrix
x = np.array([[1, 2, 3], [4, 5, 6]])
# x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3),
# giving the following matrix:
print(x + v)
# Add a vector to each column of a matrix
# x has shape (2, 3) and w has shape (2,).
# If we transpose x then it has shape (3, 2) and can be broadcast
# against w to yield a result of shape (3, 2); transposing this result
# yields the final result of shape (2, 3) which is the matrix x with
# the vector w added to each column. Gives the following matrix:
print((x.T + w).T)
# Another solution is to reshape w to be a row vector of shape (2, 1);
# we can then broadcast it directly against x to produce the same
# output.
print(x + np.reshape(w, (2, 1)))
# Multiply a matrix by a constant:
# x has shape (2, 3). Numpy treats scalars as arrays of shape ();
# these can be broadcast together to shape (2, 3), producing the
# following array:
print(x * 2)
"""Broadcasting typically makes your code more concise and faster, so you should strive to use it where possible.
This brief overview has touched on many of the important things that you need to know about numpy, but is far from
complete. Check out the [numpy reference](http://docs.scipy.org/doc/numpy/reference/) to find out much more about
numpy.
##Matplotlib
Matplotlib is a plotting library. In this section give a brief introduction to the `matplotlib.pyplot` module,
which provides a plotting system similar to that of MATLAB. """
import matplotlib.pyplot as plt
"""By running this special iPython command, we will be displaying plots inline:"""
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
"""###Plotting
The most important function in `matplotlib` is plot, which allows you to plot 2D data. Here is a simple example:
"""
# Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y = np.sin(x)
# Plot the points using matplotlib
plt.plot(x, y)
"""With just a little bit of extra work we can easily plot multiple lines at once, and add a title, legend,
and axis labels: """
y_sin = np.sin(x)
y_cos = np.cos(x)
# Plot the points using matplotlib
plt.plot(x, y_sin)
plt.plot(x, y_cos)
plt.xlabel('x axis label')
plt.ylabel('y axis label')
plt.title('Sine and Cosine')
plt.legend(['Sine', 'Cosine'])
"""###Subplots
You can plot different things in the same figure using the subplot function. Here is an example:
"""
# Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Set up a subplot grid that has height 2 and width 1,
# and set the first such subplot as active.
plt.subplot(2, 1, 1)
# Make the first plot
plt.plot(x, y_sin)
plt.title('Sine')
# Set the second subplot as active, and make the second plot.
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title('Cosine')
# Show the figure.
plt.show()
"""You can read much more about the `subplot` function in the [documentation](
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.subplot). """
| [
"[email protected]"
] | |
f5d74b47c47767172a0a4f417aabf004bcfcd977 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_192/ch50_2020_03_31_18_24_04_548537.py | 160206f489154d64f31662ca60f92d5e9b86c3d2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | def junta_nome_sobrenome(nome, sobrenome):
n_s = []
espaco = [' ']*len(n)
i = 0
while i < len(n):
n_s.append(n[i]) = espaco[i] + s[i]
i += 1
print(n_s) | [
"[email protected]"
] | |
d83403b3e62411169dc322e3b39e4f5ae49837ef | dbcef3da83c75c61542c85cfb02dd2b97d5316b5 | /016 3Sum Closest/3Sum-Closest.py | f35017735b3e80d7a98e0f12802a93dacd5e1931 | [] | no_license | wecoderBao/own-leetcode-solution-python | bbf3efad162f542f510293e614bbbadf67dcd899 | ef1760df16d2e298162a33a2ab27a537f8527446 | refs/heads/master | 2021-01-24T10:52:41.404740 | 2018-03-26T03:34:04 | 2018-03-26T03:34:04 | 123,067,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | """
Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target.
Return the sum of the three integers. You may assume that each input would have exactly one solution.
For example, given array S = {-1 2 1 -4}, and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
"""
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def twoSumClosest(nums, start, sum2):
end = len(nums) - 1
distance = abs(sum2 - nums[start] - nums[end])
ans = nums[start] + nums[end]
while start < end:
if nums[start] + nums[end] == sum2:
ans = nums[start] + nums[end]
break
elif nums[start] + nums[end] > sum2:
if abs(sum2 - nums[start] - nums[end]) < distance:
distance = abs(sum2 - nums[start] - nums[end])
ans = nums[start] + nums[end]
end -= 1
elif nums[start] + nums[end] < sum2:
if abs(sum2 - nums[start] - nums[end]) < distance:
distance = abs(sum2 - nums[start] - nums[end])
ans = nums[start] + nums[end]
start += 1
return ans
nums.sort()
result = nums[0] + nums[1] + nums[2]
# float("inf")正无穷 float("-inf")负无穷
distance = float("inf")
for i in range(len(nums)):
if i + 1 < len(nums) - 1:
sum3 = nums[i] + twoSumClosest(nums, i + 1, target - nums[i])
if abs(target - sum3) < distance:
distance = abs(target - sum3)
result = sum3
return result
if __name__ == '__main__':
arr = [-3,-2,-5,3,-4]
s = Solution()
print(s.threeSumClosest(arr, -1))
| [
"[email protected]"
] | |
b23881cd3ec3b09bc5fbeeb2a6134e6300077f74 | 524baf7de05bd3fc5b9d08083cbb0b7b47a67979 | /66.py | 7ea2f8d6d49eac49fd8935d35ebcf0323fa8d74d | [] | no_license | gk90731/100-questions-practice | 1356dd577516567a5c51a4257f59fe01b123e7ff | f855549e037b9924dd6f0370dc2f2a53765d9227 | refs/heads/master | 2020-05-25T14:04:59.642819 | 2019-05-21T12:49:04 | 2019-05-21T12:49:04 | 187,835,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """Question: Create an English to Portuguese translation program.
The program takes a word from the user as input and translates it using the following dictionary as a vocabulary source.
d = dict(weather = "clima", earth = "terra", rain = "chuva")
Expected output:
Enter word:
earth
terra"""
###########################################################################
d = dict(weather = "clima", earth = "terra", rain = "chuva")
def vocabulary(word):
return d[word]
word = input("Enter word: ")
print(vocabulary(word))
| [
"[email protected]"
] | |
526254b60ba5c538d70a8f15f972c21f136bb4f9 | 5094eebd60b2a59114ee493107cb13cf50b04d77 | /manage.py | 295da3731c97b97bfc98d0e1629722f898b7a1f7 | [] | no_license | ryu022304/NLP_100knocks_69 | bb6dd80d80485953697f3f47a91c30750bd77f71 | 3c5448ea8a1d52930b7df67472e1800256569bba | refs/heads/master | 2020-04-08T12:13:10.969238 | 2019-02-17T08:28:40 | 2019-02-17T08:28:40 | 159,337,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NLP_100knocks_69.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
6bd0239951439edd729d4ce1d71d7ea2d4fbd1ad | 97e0064a13111eef4709a0b865e58cf9d8804cc1 | /restore_expense.py | 8a1372de0f0fdfc5f069b0ca1fd295e00234f914 | [] | no_license | c1xfr2e/kky_stuff | ee9dc03f985b405b79388b385543747ad490f3aa | 47dc5aafeb8710bebd1486d5a7aff7f669ea94ce | refs/heads/master | 2021-05-28T23:02:11.676345 | 2015-07-02T10:28:24 | 2015-07-02T10:28:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,600 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'zh'
__date__ = '6/30/15'
from pymongo import MongoClient
from bson import ObjectId
import pickle
import datetime
import time
import sys
import logging
import tablib
import xlrd
import decimal
client = MongoClient('mongodb://sa:[email protected]:7900/admin')
db_wukong = client['wukong-release']
db_console = client['console-release']
c_courier = db_wukong['courier']
c_log = db_console['log']
c_expense = db_wukong['expend']
c_withdraw = db_wukong['withdraw']
start_dt = datetime.datetime(2015, 6, 28)
end_dt = datetime.datetime(2015, 6, 30, 14)
start_timestamp = int(time.mktime(start_dt.timetuple()) * 1000)
end_timestamp = int(time.mktime(end_dt.timetuple()) * 1000)
'''
unfreeze_logs = list(c_log.find(
{
'action': 'courier_account',
'arguments.freeze': 'unfreeze',
'created_time': {
'$gte': start_timestamp,
'$lt': end_timestamp
}
}
))
unfreeze_courier_ids = [ ObjectId(log['arguments']['id'][0]) for log in unfreeze_logs]
headers = (
'速递员ID',
'速递员所属校区',
'速递员姓名',
'速递员手机号'
)
couriers = list(c_courier.find(
{
'_id': { '$in': unfreeze_courier_ids }
}
))
lines = []
for c in couriers:
line = (
str(c['_id']),
c.get('school', ''),
c.get('name', ''),
c.get('mobile', '')
)
lines.append(line)
data = tablib.Dataset(*lines, headers=headers)
with open('couriers.xls', 'wb') as f:
f.write(data.xls)
bad_expense = list(c_expense.find(
{
'courier_id': { '$in': unfreeze_courier_ids },
'status': { '$in': ['unprocessed', 'freezed'] }
}
))
'''
bad_expense = list(c_expense.find(
{
'status': { '$in': ['freezed'] }
}
))
bad_withdraw_ids = []
bad_expense_ids = []
for expense in bad_expense:
fine_amount = expense['fine_amount']
if fine_amount > 0:
result = c_courier.update(
{ '_id': expense['courier_id'] },
{
'$inc': {
'debt': int(fine_amount)
}
}
)
print result
bad_withdraw_ids.append(expense['withdraw_id'])
bad_expense_ids.append(expense['_id'])
result = c_withdraw.update(
{ '_id': { '$in': bad_withdraw_ids} },
{
'$set': {
'status': 'unprocessed',
'unfreezed_time': int(time.time() * 1000)
}
},
multi=True
)
print result
result = c_expense.remove(
{ '_id': { '$in': bad_expense_ids } }
)
print result
| [
"[email protected]"
] | |
1de1dd49bfdc0892c65112b7ef0032830fb8ab54 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_azure_firewall_fqdn_tags_operations.py | 2a9ee6dd7a76b016f22f8b6f45c0eb67eb0302ba | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 5,081 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallFqdnTagsOperations:
"""AzureFirewallFqdnTagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_all(
self,
**kwargs
) -> AsyncIterable["models.AzureFirewallFqdnTagListResult"]:
"""Gets all the Azure Firewall FQDN Tags in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallFqdnTagListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.AzureFirewallFqdnTagListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureFirewallFqdnTagListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallFqdnTagListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewallFqdnTags'} # type: ignore
| [
"[email protected]"
] | |
30119909b42166146f8b4dccfc70438638f747a2 | cfa3f958c8b4c7f8617731c6580c16e8daee6218 | /board/todos/models.py | 1d2a1417eac0374b90f6398deeed334911f10766 | [] | no_license | cdh3261/Django | dd01f9c07c8b501c95445748e5d590565ca68352 | 0003b617ae500cf191e4af5cc8ab5fd06f02f76e | refs/heads/master | 2022-12-22T23:12:41.271650 | 2019-11-04T07:57:46 | 2019-11-04T07:57:46 | 217,967,586 | 0 | 0 | null | 2022-11-22T04:46:53 | 2019-10-28T04:43:09 | Python | UTF-8 | Python | false | false | 238 | py | from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
due_date = models.DateField()
author = models.CharField(max_length=50) | [
"[email protected]"
] | |
b4a7e58c3f973d0cd61a90b044f43e9f1ab81be1 | 41658affd8f1b6fd2ffb89ec08ba7fb13fffacd6 | /kbengine/assets/scripts/base/GameRoom.py | ac7864696a42cda106293ef1f62fd201c23b676d | [] | no_license | flyarong/FourDeckCards-kbengine-cocos | 42203d3d825e8828baabee236a0f38e161f144c1 | 1cd40a6d8ffa684a007cfa74fb5bbbce0da49179 | refs/heads/master | 2023-05-26T00:10:33.429000 | 2019-04-16T08:32:12 | 2019-04-16T08:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,369 | py | # -*- coding: utf-8 -*-
import math
import KBEngine
from KBEDebug import *
import time
from datetime import datetime
from interfaces.GameObject import GameObject
from entitymembers.iRoomRules import iRoomRules
from entitymembers.PlayerProxy import PlayerProxy
from BaseEntity import BaseEntity
import json
import const
import switch
import utility
import copy
from Functor import Functor
class GameRoom(BaseEntity, GameObject, iRoomRules):
"""
这是一个游戏房间/桌子类
该类处理维护一个房间中的实际游戏, 例如:斗地主、麻将等
该房间中记录了房间里所有玩家的mailbox,通过mailbox我们可以将信息推送到他们的客户端。
"""
def __init__(self):
BaseEntity.__init__(self)
GameObject.__init__(self)
iRoomRules.__init__(self)
self.agent = None
self.roomID = utility.gen_room_id()
# 状态0:未开始游戏, 1:某一局游戏中
self.state = const.ROOM_WAITING
# 存放该房间内的玩家mailbox
self.players_dict = {}
self.players_list = [None] * self.player_num
self.origin_players_list = [None] * self.player_num
# 打出财神的index
self.discard_king_idx = -1
# 庄家index
self.dealer_idx = 0
# 当前控牌的玩家index
self.current_idx = 0
# 对当前打出的牌可以进行操作的玩家的index, 服务端会限时等待他的操作
# 房间基础轮询timer
self._poll_timer = None
# 玩家操作限时timer
self._op_timer = None
# 一局游戏结束后, 玩家准备界面等待玩家确认timer
self._next_game_timer = None
#财神(多个)
self.kingTiles = []
#圈风
self.prevailing_wind = const.WIND_EAST
#一圈中玩家坐庄次数
self.dealerNumList = [0] * self.player_num
self.current_round = 0
self.all_discard_tiles = []
# 最后一位出牌的玩家
self.last_player_idx = -1
# 房间开局所有操作的记录(aid, src, des, tile)
self.op_record = []
# 房间开局操作的记录对应的记录id
self.record_id = -1
# 确认继续的玩家
self.confirm_next_idx = []
# 解散房间操作的发起者
self.dismiss_room_from = -1
# 解散房间操作开始的时间戳
self.dismiss_room_ts = 0
# 解散房间操作投票状态
self.dismiss_room_state_list = [0] * self.player_num
self.dismiss_timer = None
# 房间创建时间
self.roomOpenTime = time.time()
# 玩家操作列表
self.wait_op_info_list = []
# 杠后摸牌延时操作时的标志位,例如主要在延时中出现解散房间操作时需要拒绝操作,同上
self.wait_force_delay_kong_draw = False
# 牌局记录
self.game_result = {}
# 当前老庄数
self.cur_dealer_mul = self.begin_dealer_mul
# 房间所属的茶楼桌子, 仅茶楼中存在
self.club_table = None
# 增加房间销毁定时器
self.timeout_timer = self.add_timer(const.ROOM_TTL, self.timeoutDestroy)
# 本轮出牌分数
self.curround_score = 0
# 一轮刚开始的标志
self.round_start = True
# 玩家要不起的标志位
self.op_pass_flag = 0
# 玩家胜利个数,每当有玩家胜利,个数加一
self.player_win_num = 0
# 控牌玩家出牌列表
self.controller_discard_list = []
# 等待出牌的玩家
self.waitIdx = -1
# 玩家出牌列表
self.deskPokerList = [[],[],[],[]]
# 胜利玩家列表
self.win_list = []
# 玩家本局游戏彻底结束的列表
self.round_end_list = []
# 玩家本局得分(除了奖励分)
self.curScoreList = [0] * self.player_num
# 玩家本局最终得分(除了奖励分)
self.lastScoreList = [0] * self.player_num
# 玩家本局得奖
self.curPrizeList = [0] * self.player_num
def _reset(self):
self.state = const.ROOM_WAITING
self.agent = None
self.players_list = [None] * self.player_num
self.discard_king_idx = -1
self.dealer_idx = 0
self.current_idx = 0
self.player_win_num = 0
self.waitIdx = 0
self.op_pass_flag = 0
self.controller_discard_list = []
self.deskPokerList = [[],[],[],[]]
self.curround_score = 0
self.round_start = True
self.round_end_list = []
self.win_list = []
self._poll_timer = None
self._op_timer = None
self._next_game_timer = None
self.all_discard_tiles = []
self.kingTiles = []
self.current_round = 0
self.confirm_next_idx = []
self.prevailing_wind = const.WIND_EAST
self.dismiss_timer = None
self.dismiss_room_ts = 0
self.dismiss_room_state_list = [0, 0, 0, 0]
self.wait_op_info_list = []
self.cur_dealer_mul = self.begin_dealer_mul
KBEngine.globalData["GameWorld"].delRoom(self)
# 茶楼座位信息变更
if self.club_table:
self.club_table.seatInfoChanged()
self.club_table.room = None
self.destroySelf()
@property
def prefixLogStr(self):
""" only on Log """
return 'room:{},curround:{}'.format(self.roomID, self.current_round)
@property
def isFull(self):
count = sum([1 for i in self.players_list if i is not None])
return count == self.player_num
@property
def isEmpty(self):
count = sum([1 for i in self.players_list if i is not None])
return count == 0 and self.room_type != const.AGENT_ROOM
@property
def nextIdx(self):
# tryNext = (self.current_idx + 1) % self.player_num
# for j in range(2):
# for i in range(self.player_num):
# if self.player_num > tryNext:
# return tryNext
# tryNext = (tryNext + 1) % self.player_num
return (self.current_idx + 1) % self.player_num
@property
def wreathsList(self):
return [p.wreaths for i,p in enumerate(self.players_list)]
@property
def windsList(self):
return [p.wind for i,p in enumerate(self.players_list)]
@property
def club(self):
try:
if self.club_table:
return self.club_table.club
except:
# 引用代理的对象可能已经被destroy, 比如解散茶楼时
pass
return None
def getSit(self):
for i, j in enumerate(self.players_list):
if j is None:
return i
return None
def sendEmotion(self, avt_mb, eid):
""" 发表情 """
# DEBUG_MSG("Room.Player[%s] sendEmotion: %s" % (self.roomID, eid))
idx = None
for i, p in enumerate(self.players_list):
if p and avt_mb == p.mb:
idx = i
break
if idx is None:
return
for i, p in enumerate(self.players_list):
if p and i != idx:
p.mb.recvEmotion(idx, eid)
def sendMsg(self, avt_mb, mid, msg):
""" 发消息 """
# DEBUG_MSG("Room.Player[%s] sendMsg: %s" % (self.roomID, mid))
idx = None
for i, p in enumerate(self.players_list):
if p and avt_mb == p.mb:
idx = i
break
if idx is None:
return
for i, p in enumerate(self.players_list):
if p and i != idx:
p.mb.recvMsg(idx, mid, msg)
def sendExpression(self, avt_mb, fromIdx, toIdx, eid):
""" 发魔法表情 """
# DEBUG_MSG("Room.Player[%s] sendEmotion: %s" % (self.roomID, eid))
idx = None
for i, p in enumerate(self.players_list):
if p and avt_mb == p.mb:
idx = i
break
if idx is None:
return
for i, p in enumerate(self.players_list):
if p and i != idx:
p.mb.recvExpression(fromIdx, toIdx, eid)
def sendVoice(self, avt_mb, url):
# DEBUG_MSG("Room.Player[%s] sendVoice" % (self.roomID))
idx = None
for i, p in enumerate(self.players_list):
if p and avt_mb.userId == p.userId:
idx = i
break
if idx is None:
return
for i, p in enumerate(self.players_list):
if p and p.mb:
p.mb.recvVoice(idx, url)
def sendAppVoice(self, avt_mb, url, time):
# DEBUG_MSG("Room.Player[%s] sendVoice" % (self.roomID))
idx = None
for i, p in enumerate(self.players_list):
if p and avt_mb.userId == p.userId:
idx = i
break
if idx is None:
return
for i, p in enumerate(self.players_list):
if p and p.mb and i != idx:
p.mb.recvAppVoice(idx, url, time)
def apply_dismiss_room(self, avt_mb):
""" 游戏开始后玩家申请解散房间 """
if self.dismiss_timer is not None:
self.vote_dismiss_room(avt_mb, 1)
return
self.dismiss_room_ts = time.time()
src = None
for i, p in enumerate(self.players_list):
if p.userId == avt_mb.userId:
src = p
break
# 申请解散房间的人默认同意
self.dismiss_room_from = src.idx
self.dismiss_room_state_list[src.idx] = 1
def dismiss_callback():
self.saveRoomResult()
self.give_up_record_game()
# self.dropRoom()
self.do_drop_room()
self.dismiss_timer = self.add_timer(const.DISMISS_ROOM_WAIT_TIME, dismiss_callback)
for p in self.players_list:
if p and p.mb and p.userId != avt_mb.userId:
p.mb.req_dismiss_room(src.idx)
def vote_dismiss_room(self, avt_mb, vote):
""" 某位玩家对申请解散房间的投票 """
if self.wait_force_delay_kong_draw:
return
src = None
for p in self.players_list:
if p and p.userId == avt_mb.userId:
src = p
break
self.dismiss_room_state_list[src.idx] = vote
for p in self.players_list:
if p and p.mb:
p.mb.vote_dismiss_result(src.idx, vote)
yes = self.dismiss_room_state_list.count(1)
no = self.dismiss_room_state_list.count(2)
if yes >= 3:
if self.dismiss_timer:
self.cancel_timer(self.dismiss_timer)
self.dismiss_timer = None
self.dismiss_timer = None
self.saveRoomResult()
self.give_up_record_game()
# self.dropRoom()
self.do_drop_room()
if no >= 2:
if self.dismiss_timer:
self.cancel_timer(self.dismiss_timer)
self.dismiss_timer = None
self.dismiss_timer = None
self.dismiss_room_from = -1
self.dismiss_room_ts = 0
self.dismiss_room_state_list = [0,0,0,0]
def notify_player_online_status(self, userId, status):
src = -1
for idx, p in enumerate(self.players_list):
if p and p.userId == userId:
p.online = status
src = idx
break
if src == -1:
return
for idx, p in enumerate(self.players_list):
if p and p.mb and p.userId != userId:
p.mb.notifyPlayerOnlineStatus(src, status)
def reqEnterRoom(self, avt_mb, first=False):
"""
defined.
客户端调用该接口请求进入房间/桌子
"""
if self.isFull:
avt_mb.enterRoomFailed(const.ENTER_FAILED_ROOM_FULL)
return
if self.room_type == const.CLUB_ROOM:
if self.club and not self.club.isMember(avt_mb.userId):
avt_mb.enterRoomFailed(const.ENTER_FAILED_NOT_CLUB_MEMBER)
return
def _check_user_info(content):
if content is None:
DEBUG_MSG("room:{0},curround:{1} userId:{2} enterRoomFailed callback error: content is None".format(self.roomID, self.current_round, avt_mb.userId))
if not first:
avt_mb.enterRoomFailed(const.CREATE_FAILED_NET_SERVER_ERROR)
return False
try:
data = json.loads(content)
card_cost, diamond_cost = switch.calc_cost(self.game_round, self.getCalCostNeed())
if card_cost > data["card"]:
avt_mb.enterRoomFailed(const.ENTER_FAILED_ROOM_DIAMOND_NOT_ENOUGH)
return False
except:
err, msg, stack = sys.exc_info()
DEBUG_MSG("room:{0},curround:{1} _check_user_info callback error:{2} , exc_info: {3} ,{4}".format(self.roomID, self.current_round, content, err, msg))
avt_mb.enterRoomFailed(const.CREATE_FAILED_OTHER)
return False
return True
def callback():
if self.isDestroyed:
avt_mb.enterRoomFailed(const.ENTER_FAILED_ROOM_DESTROYED)
return
for i, p in enumerate(self.players_list):
if p and p.mb and p.mb.userId == avt_mb.userId:
p.mb = avt_mb
avt_mb.enterRoomSucceed(self, i)
return
DEBUG_MSG("{} userId:{} reqEnterRoom".format(self.prefixLogStr, avt_mb.userId))
idx = self.getSit()
# AA支付的情况下, 可能多个玩家同时走到这里
if idx is None:
avt_mb.enterRoomFailed(const.ENTER_FAILED_ROOM_FULL)
return
n_player = PlayerProxy(avt_mb, self, idx)
self.players_dict[avt_mb.userId] = n_player
self.players_list[idx] = n_player
# 茶楼座位信息变更
if self.club_table:
self.club_table.seatInfoChanged()
# 确认准备,不需要手动准备
if self.hand_prepare == const.AUTO_PREPARE:
self.prepare(avt_mb)
if not first:
self.broadcastEnterRoom(idx)
else:
avt_mb.createRoomSucceed(self)
self.ready_after_prepare()
if switch.DEBUG_BASE:
callback()
else:
if first or self.pay_mode != const.AA_PAY_MODE:
callback()
else:
def _user_info_callback(content):
if _check_user_info(content):
callback()
utility.get_user_info(avt_mb.accountName, _user_info_callback)
def client_prepare(self, avt_mb):
DEBUG_MSG("room:{0},curround:{1} client_prepare userId:{2}".format(self.roomID, self.current_round, avt_mb.userId))
self.prepare(avt_mb)
self.ready_after_prepare()
def prepare(self, avt_mb):
""" 第一局/一局结束后 玩家准备 """
if self.state == const.ROOM_PLAYING or self.state == const.ROOM_TRANSITION:
return
idx = -1
for i, p in enumerate(self.players_list):
if p and p.userId == avt_mb.userId:
idx = i
break
if idx not in self.confirm_next_idx:
self.confirm_next_idx.append(idx)
for p in self.players_list:
if p and p.idx != idx:
p.mb.readyForNextRound(idx)
def ready_after_prepare(self):
if len(self.confirm_next_idx) == self.player_num and self.isFull and self.state == const.ROOM_WAITING:
self.pay2StartGame()
def reqReconnect(self, avt_mb):
DEBUG_MSG("room:{0},curround:{1} avt_mb reqReconnect userid:{2}".format(self.roomID, self.current_round, avt_mb.userId))
if avt_mb.userId not in self.players_dict.keys():
return
DEBUG_MSG("room:{0},curround:{1} avt_mb reqReconnect player:{2} is in room".format(self.roomID, self.current_round, avt_mb.userId))
# 如果进来房间后牌局已经开始, 就要传所有信息
# 如果还没开始, 跟加入房间没有区别
player = self.players_dict[avt_mb.userId]
player.mb = avt_mb
player.online = 1
if self.state == const.ROOM_PLAYING or 0 < self.current_round <= self.game_round:
if self.state == const.ROOM_WAITING:
# 重连回来直接准备
self.client_prepare(avt_mb)
rec_room_info = self.get_reconnect_room_dict(player.mb.userId)
player.mb.handle_reconnect(rec_room_info)
if len(self.getTipsCards()) == 0 and self.players_list[self.current_idx].online and not self.round_start:
self.players_list[self.current_idx].mb.doOperation(const.OP_PASS, [])
else:
sit = 0
for idx, p in enumerate(self.players_list):
if p and p.mb:
if p.mb.userId == avt_mb.userId:
sit = idx
break
avt_mb.enterRoomSucceed(self, sit)
def reqLeaveRoom(self, player):
"""
defined.
客户端调用该接口请求离开房间/桌子
"""
DEBUG_MSG("room:{0},curround:{1} reqLeaveRoom userId:{2}, room_type:{3}, state:{4}".format(self.roomID, self.current_round, player.userId, self.room_type, self.state))
if self.state != const.ROOM_WAITING:
DEBUG_MSG("{} reqLeaveRoom: not allow ".format(self.prefixLogStr))
# player.quitRoomFailed(-1)
return
if player.userId in self.players_dict.keys():
n_player = self.players_dict[player.userId]
idx = n_player.idx
if idx == 0 and self.room_type == const.NORMAL_ROOM:
# 房主离开房间, 则解散房间
self.give_up_record_game()
# self.dropRoom()
self.do_drop_room()
else:
n_player.mb.quitRoomSucceed()
self.players_list[idx] = None
del self.players_dict[player.userId]
if idx in self.confirm_next_idx:
self.confirm_next_idx.remove(idx)
# 通知其它玩家该玩家退出房间
for i, p in enumerate(self.players_list):
if i != idx and p and p.mb:
p.mb.othersQuitRoom(idx)
# 茶楼座位信息变更
if self.room_type == const.CLUB_ROOM and self.club_table:
self.club_table.seatInfoChanged()
if self.isEmpty:
self.give_up_record_game()
# self.dropRoom()
self.do_drop_room()
def dropRoom(self):
self.dismiss_timer = None
for i,p in enumerate(self.players_list):
if p and p.mb:
try:
p.mb.quitRoomSucceed()
except:
pass
if self.room_type == const.AGENT_ROOM and self.agent:
# 将房间从代理房间中删除
if not self.agent.isDestroyed:
self.agent.agentRoomDropped(self.roomID)
try:
# 如果是代开房, 没打完一局返还房卡
if switch.DEBUG_BASE == 0 and self.current_round < 1 and self.pay_mode == const.AGENT_PAY_MODE:
card_cost, diamond_cost = switch.calc_cost(self.game_round, self.getCalCostNeed())
def callback(room_id, user_id, content):
try:
content = content.decode()
if content[0] != '{':
DEBUG_MSG(content)
return
except:
DEBUG_MSG("dropRoom{} AgentRoom return Failed, userID = {}. return {} back".format(room_id, user_id, (card_cost, diamond_cost)))
utility.update_card_diamond(self.agent.accountName, card_cost, diamond_cost,
Functor(callback, self.roomID, self.agent.userId), "FourDeckCards drop AgentRoomID:{}".format(self.roomID)) # reason 必须为英文
except:
pass
self._reset()
def do_drop_room(self):
if self.game_result:
if len(self.game_result['round_result']) == 0:
self.dropRoom()
else:
self.subtotal_result()
else:
self.dropRoom()
def broadcastOperation2(self, idx, aid, tile_list = None, curround_score = 0):
""" 将操作广播除了自己之外的其他人 """
for i, p in enumerate(self.players_list):
if p and i != idx:
p.mb.postOperation(idx, aid, tile_list, curround_score)
def broadcastMultiOperation(self, idx_list, aid_list, tile_list=None):
for i, p in enumerate(self.players_list):
if p is not None:
p.mb.postMultiOperation(idx_list, aid_list, tile_list)
def broadcastRoundEnd(self, info):
# 广播胡牌或者流局导致的每轮结束信息, 包括算的扎码和当前轮的统计数据
# 先记录玩家当局战绩, 会累计总得分
self.record_round_result()
self.state = const.ROOM_WAITING
DEBUG_MSG("room:{0},curround:{1} broadcastRoundEnd state:{2}".format(self.roomID, self.current_round, self.state))
# info['left_tiles'] = self.tiles
info['player_info_list'] = [p.get_round_client_dict() for p in self.players_list if p is not None]
DEBUG_MSG("room:{0},curround:{1}=={2}".format(self.roomID, self.current_round, "&" * 30))
DEBUG_MSG("room:{0},curround:{1} RoundEnd info:{2}".format(self.roomID, self.current_round, info))
self.confirm_next_idx = []
for p in self.players_list:
if p:
p.mb.roundResult(info)
# self.end_record_game(info)
def pay2StartGame(self):
""" 开始游戏 """
DEBUG_MSG("room:{},curround:{},game_mode:{},base_score:{},king_mode:{},begin_dealer_mul:{},win_mode:{},three_job:{},pong_useful:{},bao_tou:{},round_max_lose:{},game_max_lose:{},game_round:{},hand_prepare:{} pay2StartGame state:{}".format(self.roomID, self.current_round,
self.game_mode, self.base_score, self.king_mode, self.begin_dealer_mul, self.win_mode, self.three_job,
self.pong_useful, self.bao_tou, self.round_max_lose, self.game_max_lose, self.game_round, self.hand_prepare,
self.state))
if self.timeout_timer:
self.cancel_timer(self.timeout_timer)
self.timeout_timer = None
self.state = const.ROOM_TRANSITION
if self.current_round == 0:
# 第一局备份原始座位
self.origin_players_list = self.players_list[:]
self.dealer_idx = 0
# 仅仅在第1局扣房卡, 不然每局都会扣房卡
if self.current_round == 0:
if switch.DEBUG_BASE:
self.paySuccessCbk()
return
card_cost, diamond_cost = switch.calc_cost(self.game_round, self.getCalCostNeed())
if self.pay_mode == const.NORMAL_PAY_MODE:
pay_account = self.origin_players_list[0].mb.accountName
reason = "FourDeckCards RoomID:{}".format(self.roomID)
def pay_callback(content):
if self._check_pay_callback(content):
self.paySuccessCbk()
utility.update_card_diamond(pay_account, -card_cost, -diamond_cost, pay_callback, reason)
elif self.pay_mode == const.CLUB_PAY_MODE:
pay_account = self.club.owner['accountName']
reason = "FourDeckCards Club:{} RoomID:{}".format(self.club.clubId, self.roomID)
def pay_callback(content):
if self._check_pay_callback(content):
self.paySuccessCbk()
utility.update_card_diamond(pay_account, -card_cost, -diamond_cost, pay_callback, reason)
elif self.pay_mode == const.AGENT_PAY_MODE:
# 开房的时候已经扣了房卡
self.paySuccessCbk()
elif self.pay_mode == const.AA_PAY_MODE:
pay_accounts = [p.mb.accountName for p in self.players_list]
if self.club:
reason = "FourDeckCards Club:{} AA RoomID:{}".format(self.club.clubId, self.roomID)
else:
reason = "FourDeckCards AA RoomID:{}".format(self.roomID)
def pay_callback(content):
if self._check_aa_pay_callback(content):
self.paySuccessCbk()
utility.update_card_diamond_aa(pay_accounts, -card_cost, -diamond_cost, pay_callback, reason)
else:
ERROR_MSG("pay2StartGame Error: No this PayMode:{}".format(self.pay_mode))
return
else:
self.paySuccessCbk()
def _check_pay_callback(self, content):
if content is None or content[0] != '{':
DEBUG_MSG('{} pay callback {}'.format(self.prefixLogStr, content))
self.give_up_record_game()
# self.dropRoom()
self.do_drop_room()
return False
return True
def _check_aa_pay_callback(self, content):
res = True
try:
ret = json.loads(content)
if ret['errcode'] != 0:
res = False
DEBUG_MSG('room:{},cur_round:{} aa pay callback error code={}, msg={}'.format(self.roomID, self.current_round, ret['errcode'], ret['errmsg']))
except:
res = False
import traceback
ERROR_MSG(traceback.format_exc())
if not res:
self.give_up_record_game()
self.do_drop_room()
return False
return True
# 扣房卡/钻石成功后开始游戏(不改动部分)
def paySuccessCbk(self):
DEBUG_MSG("room:{},curround:{} paySuccessCbk state:{}".format(self.roomID, self.current_round, self.state))
try:
# 第一局时房间默认房主庄家, 之后谁上盘赢了谁是, 如果臭庄, 上一把玩家继续坐庄
swap_list = [0,1,2,3]
# self.swapSeat(swap_list)
self.op_record = []
# self.op_special_record = []
self.state = const.ROOM_PLAYING
self.current_round += 1
self.all_discard_tiles = []
for p in self.players_list:
p.reset()
self.current_idx = self.dealer_idx
self.discard_king_idx = -1
def begin(prefabHandTiles=None):
# self.setPrevailingWind() # 圈风
# self.setPlayerWind() # 位风
self.initTiles() # 牌堆
self.deal(prefabHandTiles) # 发牌
# self.kongWreath() # 杠花
# self.addWreath() # 补花
# self.rollKingTile(prefabKingTiles) # 财神
beginTilesList = [copy.deepcopy(p.tiles) for i, p in enumerate(self.players_list)]
self.tidy() # 整理
self.beginRound(True) # 第一张牌优先抓,后开始游戏
# beginTilesList[self.current_idx].append(self.players_list[self.current_idx].last_draw)
self.startGame(beginTilesList, swap_list)
if switch.DEBUG_BASE == 0:
begin([], [[] for i in range(self.player_num)], [])
elif switch.DEBUG_BASE == 1: # 开发模式 除去不必要的通信时间 更接近 真实环境
prefabHandTiles = [
[],
[],
[],
[]
]
begin(prefabHandTiles)
else:
def callback(content):
DEBUG_MSG("room:{},curround:{} debugmode,content:{}".format(self.roomID, self.current_round,content))
if content is None or content == "10000" or content[0:2] != "ok": # 10000代表找不到该文件
begin()
else:
try:
content = content[2:]
data = json.loads(content)
DEBUG_MSG("room:{},curround:{} data:{}".format(self.roomID, self.current_round, data))
handTiles = [[] for i in range(self.player_num)]
# 检查数据
for k,v in enumerate(data["handTiles"]):
if k < self.player_num:
for t in v:
if utility.validTile(t):
handTiles[k].append(t)
begin(handTiles)
except:
err, msg, stack = sys.exc_info()
DEBUG_MSG("room:{},curround:{} try begin error; exc_info: {} ,{}".format(self.roomID, self.current_round, err, msg))
utility.getDebugPrefab(self.origin_players_list[0].mb.accountName, callback)
except:
err, msg, stack = sys.exc_info()
DEBUG_MSG("room:{},curround:{} paySuccessCbk error; exc_info: {} ,{}".format(self.roomID, self.current_round, err, msg))
DEBUG_MSG("room:{},curround:{} consume failed! users: {}".format(self.roomID, self.current_round, [p.userId for p in self.origin_players_list if p]))
# 玩家开始游戏
def startGame(self, beginTilesList, swap_list):
self.wait_force_delay_kong_draw = False
DEBUG_MSG("room:{},curround:{} start game swap_list:{}".format(self.roomID, self.current_round, swap_list))
diceList = self.throwDice([self.dealer_idx])
DEBUG_MSG("room:{},curround:{} start game info:{}".format(self.roomID, self.current_round, self.dealer_idx))
for i,p in enumerate(self.players_list):
if p and p.mb:
DEBUG_MSG("room:{},curround:{} start tiles:{}".format(self.roomID, self.current_round, p.tiles))
for i,p in enumerate(self.players_list):
if p and p.mb:
DEBUG_MSG("room:{},curround:{} start idx:{} begin tiles:{}".format(self.roomID, self.current_round, i, beginTilesList[i]))
p.mb.startGame(self.dealer_idx, beginTilesList[i], swap_list)
self.begin_record_game(diceList)
def cutAfterKong(self):
if len(self.tiles) <= self.lucky_num + const.END_TILE_NUMBER:
self.drawEnd()
elif len(self.tiles) > self.lucky_num + const.END_TILE_NUMBER + 1:
player = self.players_list[self.current_idx]
ti = self.tiles[0]
self.tiles = self.tiles[1:]
player.cutTile(ti)
def beginRound(self, is_first = False):
pass
# if len(self.tiles) <= self.lucky_num + const.END_TILE_NUMBER:
# self.drawEnd()
# return
# ti = self.tiles[0]
# self.tiles = self.tiles[1:]
# DEBUG_MSG("room:{0},curround:{1} idx:{2} beginRound tile:{3} leftNum:{4}".format(self.roomID, self.current_round, self.current_idx, ti, len(self.tiles)))
# p = self.players_list[self.current_idx]
# p.drawTile(ti, is_first)
def drawEnd(self):
DEBUG_MSG("room:{0},curround:{1} drawEnd.".format(self.roomID, self.current_round))
""" 臭庄 """
lucky_tiles = ""
# self.cal_lucky_tile_score(lucky_tiles, -1)
self.settlement()
info = dict()
info['win_op'] = -1
info['win_idx'] = -1
info['lucky_tiles'] = lucky_tiles
info['result_list'] = []
info['finalTile'] = 0
info['from_idx'] = -1
info['multiply'] = 0
info['dealer_idx'] = self.dealer_idx
info['cur_dealer_mul'] = self.cur_dealer_mul
info['job_relation'] = []
DEBUG_MSG("room:{0},curround:{1} drawEnd INFO:{2}".format(self.roomID, self.current_round, info))
if self.current_round < self.game_round: # 在打片模式下 流局必然 继续
self.broadcastRoundEnd(info)
else:
self.endAll(info)
def winGame(self, idx, op, finalTile, from_idx, score, result):
""" 座位号为idx的玩家胡牌 """
# self.deskPokerList[idx] = []
self.win_list.append(idx)
if len(self.win_list) == self.player_num - 1:
self.lastScoreList = copy.deepcopy(self.curScoreList)
surScore = 0
for i,score in enumerate(self.lastScoreList):
if score % 10 == 5:
self.lastScoreList[i] -= 5
surScore += 5
self.lastScoreList[i] -= 100
self.lastScoreList[self.win_list[0]] += surScore
self.cal_score(idx, self.win_list, op)
self.broadcastOperation(idx, op, [], self.curround_score, self.curScoreList)
DEBUG_MSG("room:{0},curround:{1} score0:{2} score1:{3} score2:{4} score3:{5}".format(self.roomID, self.current_round, self.players_list[0].score, self.players_list[1].score, self.players_list[2].score, self.players_list[3].score))
if len(self.win_list) != self.player_num - 1:
return
self.settlement()
prizeScoreList = [0] * self.player_num
for i in range(len(self.curPrizeList)):
prize_score = 0
for j in range(len(self.curPrizeList)):
if i == j:
prize_score += self.curPrizeList[j] * 90
else:
prize_score -= self.curPrizeList[j] * 30
prizeScoreList[i] = prize_score
info = dict()
DEBUG_MSG("room:{0},curround:{1} roundEnd win_list:{2} curScoreList:{3} curPrizeList:{4}".format(self.roomID, self.current_round, self.win_list, self.curScoreList, self.curPrizeList))
info['win_list'] = self.win_list
info['curScoreList'] = self.curScoreList
info['lastScoreList'] = self.lastScoreList
info['prizeScoreList'] = prizeScoreList
info['dealer_idx'] = last_dealer_idx = self.dealer_idx
self.dealer_idx = self.win_list[0]
self.round_start = True
self.current_idx = self.win_list[0]
self.waitIdx = self.win_list[0]
self.op_pass_flag = 0
self.controller_discard_list = []
self.deskPokerList = [[],[],[],[]]
self.curround_score = 0
self.round_end_list = []
self.win_list = []
self.curScoreList = [0] * self.player_num
self.lastScoreList = [0] * self.player_num
self.curPrizeList = [0] * self.player_num
if self.current_round < self.game_round:
self.broadcastRoundEnd(info)
else:
self.endAll(info)
def begin_record_game(self, diceList):
DEBUG_MSG("room:{0},curround:{1} begin record game".format(self.roomID, self.current_round))
self.begin_record_room()
KBEngine.globalData['GameWorld'].begin_record_room(self, self.roomID, self, diceList)
def begin_record_callback(self, record_id):
self.record_id = record_id
def end_record_game(self, result_info):
DEBUG_MSG("room:{0},curround:{1} end record game".format(self.roomID, self.current_round))
KBEngine.globalData['GameWorld'].end_record_room(self.roomID, self, result_info)
self.record_id = -1
def give_up_record_game(self):
DEBUG_MSG("room:{0},curround:{1} give up record game".format(self.roomID, self.current_round))
KBEngine.globalData['GameWorld'].give_up_record_room(self.roomID)
def settlement(self):
for i,p in enumerate(self.players_list):
if p is not None:
p.settlement()
def endAll(self, info):
""" 游戏局数结束, 给所有玩家显示最终分数记录 """
# 先记录玩家当局战绩, 会累计总得分
self.record_round_result()
# info['left_tiles'] = self.tiles
info['player_info_list'] = [p.get_round_client_dict() for p in self.players_list if p is not None]
player_info_list = [p.get_final_client_dict() for p in self.players_list if p is not None]
DEBUG_MSG("room:{0},curround:{1} endAll player_info_list = {2} info = {3}".format(self.roomID, self.current_round, player_info_list, info))
for p in self.players_list:
if p and p.mb:
p.mb.finalResult(player_info_list, info)
if self.room_type == const.CLUB_ROOM:
# 有效圈数加一
p.mb.addGameCount()
# self.end_record_game(info)
self.saveRoomResult()
self._reset()
def subtotal_result(self):
self.dismiss_timer = None
player_info_list = [p.get_final_client_dict() for p in self.players_list if p is not None]
DEBUG_MSG("room:{0},curround:{1} subtotal_result,player_info_list:{2}".format(self.roomID, self.current_round, player_info_list))
for p in self.players_list:
if p and p.mb:
try:
p.mb.subtotalResult(player_info_list)
except:
pass
self._reset()
def doOperation(self, avt_mb, aid, tile_list):
idx = -1
for i, p in enumerate(self.players_list):
if p and p.mb == avt_mb:
idx = i
# tile = tile_list[0]
DEBUG_MSG("room:{0},curround:{1} idx:{2} doOperation current_idx:{3} aid:{4} tile_list:{5}".format(self.roomID, self.current_round, idx, self.current_idx, aid, tile_list))
"""
当前控牌玩家摸牌后向服务端确认的操作
"""
if self.dismiss_room_ts != 0 and int(time.time() - self.dismiss_room_ts) < const.DISMISS_ROOM_WAIT_TIME:
# 说明在准备解散投票中,不能进行其他操作
DEBUG_MSG("room:{0},curround:{1} idx:{2} doOperationFailed dismiss_room_ts:{3}".format(self.roomID, self.current_round, idx, self.dismiss_room_ts))
avt_mb.doOperationFailed(const.OP_ERROR_VOTE)
return
if self.state != const.ROOM_PLAYING:
DEBUG_MSG("room:{0},curround:{1} idx:{2} doOperationFailed state:{3}".format(self.roomID, self.current_round, idx, self.state))
avt_mb.doOperationFailed(const.OP_ERROR_STATE)
return
# DEBUG_MSG("doOperation idx:{0},self.current_idx:{1},self.wait_op_info_list:{2}".format(idx, self.current_idx, self.wait_op_info_list))
if len(tile_list) == 0 and aid == const.OP_DISCARD:
avt_mb.doOperationFailed(const.OP_ERROR_ILLEGAL)
return
if idx != self.current_idx:
avt_mb.doOperationFailed(const.OP_ERROR_NOT_CURRENT)
return
p = self.players_list[idx]
# if aid == const.OP_DISCARD and self.can_discard(idx, tile):
# self.all_discard_tiles.append(tile)
# p.discardTile(tile)
# elif aid == const.OP_CONCEALED_KONG and self.can_concealed_kong(idx, tile):
# p.concealedKong(tile)
# elif aid == const.OP_KONG_WREATH and self.can_kong_wreath(p.tiles, tile):
# p.kongWreath(tile)
# elif aid == const.OP_CONTINUE_KONG and self.can_continue_kong(idx, tile):
# p.continueKong(tile)
# elif aid == const.OP_PASS:
# # 自己摸牌的时候可以杠或者胡时选择过, 则什么都不做. 继续轮到该玩家打牌.
# pass
# elif aid == const.OP_DRAW_WIN: #普通自摸胡
# is_win, score, result = self.can_win(list(p.tiles), p.last_draw, const.OP_DRAW_WIN, idx)
# DEBUG_MSG("room:{0},curround:{1} idx:{2} do OP_DRAW_WIN==>{3}, {4}, {5}".format(self.roomID, self.current_round, idx, is_win, score, result))
# if is_win:
# p.draw_win(tile, score, result)
# else:
# avt_mb.doOperationFailed(const.OP_ERROR_ILLEGAL)
# self.current_idx = self.nextIdx
# self.beginRound()
# elif aid == const.OP_WREATH_WIN: #自摸8张花胡
# is_win, score, result = self.can_win(list(p.tiles), p.last_draw, const.OP_WREATH_WIN, idx)
# DEBUG_MSG("room:{0},curround:{1} idx:{2} do OP_WREATH_WIN==>{3}, {4}, {5}".format(self.roomID, self.current_round, idx, is_win, score, result))
# if is_win:
# p.draw_win(tile, score, result)
# else:
# avt_mb.doOperationFailed(const.OP_ERROR_ILLEGAL)
# self.current_idx = self.nextIdx
# self.beginRound()
# else:
# avt_mb.doOperationFailed(const.OP_ERROR_ILLEGAL)
# self.current_idx = self.nextIdx
# self.beginRound()
self.deskPokerList[idx] = tile_list
curtile_list = utility.rightShiftCards(tile_list)
last_list = utility.rightShiftCards(self.controller_discard_list)
for i in range(self.player_num):
self.current_idx = self.nextIdx
if self.current_idx in self.win_list:
self.deskPokerList[self.current_idx] = []
continue
else:
break
DEBUG_MSG("room:{0},curround:{1} idx:{2} curtile_list:{3} last_list:{4} curPrizeList{5}".format(self.roomID, self.current_round, idx, curtile_list, last_list, self.curPrizeList))
if aid == const.OP_DISCARD and utility.compareTile(curtile_list, last_list):
self.curround_score += utility.getDiscardScore(curtile_list)
bombScore = utility.getBombScore(curtile_list)
if bombScore > 0:
self.cal_score(idx, self.win_list, const.OP_DISCARD, bombScore)
self.curPrizeList[idx] += int(bombScore / 30)
if len(self.round_end_list) != len(self.win_list):
end_idx = self.win_list[len(self.win_list) - 1]
self.round_end_list.append(end_idx)
p.discardTile(tile_list)
elif aid == const.OP_PASS:
# 客户端判定没有大过上家的牌
self.op_pass_flag += 1
if self.op_pass_flag == self.player_num - len(self.round_end_list) - 1:
# 这里是其他人都要不起,再次轮到自己出牌
self.controller_discard_list = []
self.round_start = True
if len(self.round_end_list) == len(self.win_list):
self.players_list[self.current_idx].add_score(self.curround_score)
self.curScoreList[self.current_idx] += self.curround_score
else:
end_idx = self.win_list[len(self.win_list) - 1]
self.curScoreList[end_idx] += self.curround_score
self.players_list[end_idx].add_score(self.curround_score)
self.round_end_list.append(end_idx)
self.curround_score = 0
# elif self.op_pass_flag == self.player_num - len(self.win_list) - 1:
# end_idx = self.win_list[len(self.win_list) - 1]
# self.round_end_list.append(end_idx)
# self.controller_discard_list = []
# self.round_start = True
# self.curScoreList[self.current_idx] += self.curround_score
# self.players_list[self.current_idx].add_score(self.curround_score)
# self.curround_score = 0
DEBUG_MSG("room:{0},curround:{1} idx:{2}OP_PASS curround_score:{3}".format(self.roomID, self.current_round, idx, self.curround_score))
def delay_callback():
self.broadcastOperation(idx, aid, [], self.curround_score, self.curScoreList)
self.add_timer(const.DELAY_OP_PASS, delay_callback)
else:
DEBUG_MSG("room:{0},curround:{1} idx:{2} doOperationFailed".format(self.roomID, self.current_round, idx))
avt_mb.doOperationFailed(const.OP_ERROR_ILLEGAL)
return
if len(self.win_list) < self.player_num - 1:
DEBUG_MSG("room:{0},curround:{1} idx:{2} curround_score:{3}".format(self.roomID, self.current_round, idx, self.curround_score))
def delay_callback():
self.waitForOperation(self.current_idx, const.OP_DISCARD, 0, self.round_start)
if aid == const.OP_PASS:
self.add_timer(const.DELAY_OP_PASS, delay_callback)
else:
self.waitForOperation(self.current_idx, const.OP_DISCARD, 0, self.round_start)
def broadcastOperation(self, idx, aid, tile_list = None, curround_score = 0, curScoreList = []):
"""
将操作广播给所有人, 包括当前操作的玩家
:param idx: 当前操作玩家的座位号
:param aid: 操作id
:param tile_list: 出牌的list
"""
for i, p in enumerate(self.players_list):
if p is not None:
p.mb.postOperation(idx, aid, tile_list, curround_score, curScoreList)
def confirmOperation(self, avt_mb, aid, tile_list):
tile = tile_list[0]
idx = -1
for i, p in enumerate(self.players_list):
if p and p.mb == avt_mb:
idx = i
DEBUG_MSG("room:{0},curround:{1} idx:{2} confirmOperation aid:{3} tile_list:{4}".format(self.roomID, self.current_round, idx, aid, tile_list))
""" 被轮询的玩家确认了某个操作 """
if self.dismiss_room_ts != 0 and int(time.time() - self.dismiss_room_ts) < const.DISMISS_ROOM_WAIT_TIME:
# 说明在准备解散投票中,不能进行其他操作
return
#玩家是否可以操作
DEBUG_MSG("room:{0},curround:{1} idx:{2} wait_op_info_list:{3}".format(self.roomID, self.current_round, idx, self.wait_op_info_list))
if len(self.wait_op_info_list) <= 0 or sum([1 for waitOpDict in self.wait_op_info_list if (waitOpDict["idx"] == idx and waitOpDict["state"] == const.OP_STATE_WAIT)]) <= 0:
avt_mb.doOperationFailed(const.OP_ERROR_NOT_CURRENT)
return
#提交 玩家结果
for waitOpDict in self.wait_op_info_list:
if waitOpDict["idx"] == idx:
if waitOpDict["aid"] == const.OP_CHOW and aid == const.OP_CHOW and waitOpDict["tileList"][0] == tile_list[0] and self.can_chow_list(waitOpDict["idx"], tile_list):
waitOpDict["state"] = const.OP_STATE_SURE
waitOpDict["tileList"] = tile_list
elif waitOpDict["aid"] == aid and aid != const.OP_CHOW:
waitOpDict["state"] = const.OP_STATE_SURE
else:
waitOpDict["state"] = const.OP_STATE_PASS
#有玩家可以操作
isOver,confirmOpDict = self.getConfirmOverInfo()
if isOver:
DEBUG_MSG("room:{0},curround:{1} commit over {2}.".format(self.roomID, self.current_round, confirmOpDict))
temp_wait_op_info_list = copy.deepcopy(self.wait_op_info_list)
self.wait_op_info_list = []
if len(confirmOpDict) > 0:
sureIdx = confirmOpDict["idx"]
p = self.players_list[sureIdx]
if confirmOpDict["aid"] == const.OP_CHOW:
self.current_idx = sureIdx
p.chow(confirmOpDict["tileList"])
elif confirmOpDict["aid"] == const.OP_PONG:
self.current_idx = sureIdx
p.pong(confirmOpDict["tileList"][0])
elif confirmOpDict["aid"] == const.OP_EXPOSED_KONG:
self.current_idx = sureIdx
p.exposedKong(confirmOpDict["tileList"][0])
elif confirmOpDict["aid"] == const.OP_KONG_WIN:
p.kong_win(confirmOpDict["tileList"][0], confirmOpDict["score"], confirmOpDict["result"])
elif confirmOpDict["aid"] == const.OP_GIVE_WIN:
p.give_win(confirmOpDict["tileList"][0], confirmOpDict["score"], confirmOpDict["result"])
else:
lastAid = temp_wait_op_info_list[0]["aid"]
if lastAid == const.OP_WREATH_WIN:
self.current_idx = self.last_player_idx
elif lastAid == const.OP_KONG_WIN:
#*********没人抢杠胡 杠要算分?***********
self.current_idx = self.last_player_idx
if self.can_cut_after_kong():
self.cutAfterKong()
else:
self.current_idx = self.nextIdx
self.beginRound()
else:
lastAid = temp_wait_op_info_list[0]["aid"]
if lastAid == const.OP_WREATH_WIN:
self.current_idx = self.last_player_idx
elif lastAid == const.OP_KONG_WIN:
#*********没人抢杠胡 杠要算分?***********
self.current_idx = self.last_player_idx
else:
self.current_idx = self.nextIdx
self.beginRound()
def getConfirmOverInfo(self):
for i in range(len(self.wait_op_info_list)):
waitState = self.wait_op_info_list[i]["state"]
if waitState == const.OP_STATE_PASS:
continue
elif waitState == const.OP_STATE_WAIT: #需等待其他玩家操作
return False, {}
elif waitState == const.OP_STATE_SURE: #有玩家可以操作
return True, self.wait_op_info_list[i]
return True, {} #所有玩家选择放弃
def waitForOperation(self, idx, aid, tile, round_start = False): # aid抢杠 杠花没人可胡 nextIdx还是自己
# notifyOpList = self.getNotifyOpList(idx, aid, tile)
# if sum([len(x) for x in notifyOpList]) > 0:
# DEBUG_MSG("room:{0},curround:{1} waitForOperation from:{2},aid:{3},tile:{4}==>notifyOpList:{5}".format(self.roomID, self.current_round, idx, aid, tile, notifyOpList))
# for i,p in enumerate(self.players_list):
# if p is not None and len(notifyOpList[i]) > 0:
# waitAidList = [notifyOp["aid"] for notifyOp in notifyOpList[i]]
# p.mb.waitForOperation(waitAidList, [tile,])
# else:
# DEBUG_MSG("room:{0},curround:{1} nobody waitForOperation from:{2},aid:{3},tile:{4},nextIdx:{5}".format(self.roomID, self.current_round, idx, aid, tile, nextIdx))
# if self.can_cut_after_kong() and (aid >> 3) == const.SHOW_KONG:
# self.cutAfterKong()
# self.current_idx = self.nextIdx if nextIdx < 0 else nextIdx
# self.beginRound()
DEBUG_MSG("room:{0},curround:{1} waitForOperation idx:{2},aid:{3},round_start:{4}".format(self.roomID, self.current_round, idx, aid, round_start))
self.waitIdx = self.current_idx
for i,p in enumerate(self.players_list):
if p is not None:
p.mb.waitForOperation(idx, aid, round_start)
if len(self.getTipsCards()) == 0 and self.players_list[self.current_idx].online and not round_start:
self.players_list[self.current_idx].mb.doOperation(const.OP_PASS, [])
def get_init_client_dict(self):
return {
'roomID' : self.roomID,
'ownerId' : self.owner_uid,
'roomType' : self.room_type,
'dealerIdx' : self.dealer_idx,
'curRound' : self.current_round,
'maxRound' : self.game_round,
'player_num' : self.player_num,
# 'king_num' : self.king_num,
'pay_mode' : self.pay_mode,
# 'game_mode' : self.game_mode,
# 'game_max_lose' : self.game_max_lose,
# 'round_max_lose' : self.round_max_lose,
# 'lucky_num' : self.lucky_num,
'hand_prepare' : self.hand_prepare,
# 'base_score' : self.base_score,
# 'king_mode' : self.king_mode,
# 'begin_dealer_mul' : self.begin_dealer_mul,
# 'cur_dealer_mul' : self.cur_dealer_mul,
# 'win_mode' : self.win_mode,
# 'three_job' : self.three_job,
# 'pong_useful' : self.pong_useful,
# 'bao_tou' : self.bao_tou,
'club_id' : self.club.clubId if self.club is not None else 0,
'player_base_info_list': [p.get_init_client_dict() for p in self.players_list if p is not None],
'player_state_list': [1 if i in self.confirm_next_idx else 0 for i in range(const.ROOM_PLAYER_NUMBER)],
}
def get_agent_client_dict(self):
return {
'roomID' : self.roomID,
'curRound' : self.current_round,
# 'cur_dealer_mul' : self.cur_dealer_mul,
'maxRound' : self.game_round,
# 'king_num' : self.king_num,
'pay_mode' : self.pay_mode,
# 'game_mode' : self.game_mode,
# 'game_max_lose' : self.game_max_lose,
# 'round_max_lose' : self.round_max_lose,
'player_num' : self.player_num,
# 'base_score' : self.base_score,
# 'king_mode' : self.king_mode,
# 'win_mode' : self.win_mode,
# 'three_job' : self.three_job,
# 'pong_useful' : self.pong_useful,
# 'bao_tou' : self.bao_tou,
# 'begin_dealer_mul' : self.begin_dealer_mul,
# 'lucky_num' : self.lucky_num,
'hand_prepare' : self.hand_prepare,
'player_simple_info_list': [p.get_simple_client_dict() for p in self.players_list if p is not None]
}
def get_agent_complete_dict(self):
return {
'roomID' : self.roomID,
'maxRound' : self.game_round,
# 'king_num' : self.king_num,
# 'game_mode' : self.game_mode,
# 'game_max_lose' : self.game_max_lose,
# 'round_max_lose' : self.round_max_lose,
'player_num' : self.player_num,
'pay_mode' : self.pay_mode,
# 'base_score' : self.base_score,
# 'king_mode' : self.king_mode,
# 'win_mode' : self.win_mode,
# 'three_job' : self.three_job,
# 'pong_useful' : self.pong_useful,
# 'bao_tou' : self.bao_tou,
# 'begin_dealer_mul' : self.begin_dealer_mul,
# 'lucky_num' : self.lucky_num,
'hand_prepare' : self.hand_prepare,
'time' : utility.get_cur_timestamp(),
'player_simple_info_list': [p.get_simple_client_dict() for p in self.origin_players_list if p is not None],
}
def get_club_complete_dict(self):
return {
'roomID' : self.roomID,
'time' : utility.get_cur_timestamp(),
'player_info_list': [p.get_club_client_dict() for p in self.origin_players_list if p is not None],
}
def get_reconnect_room_dict(self, userId):
dismiss_left_time =const.DISMISS_ROOM_WAIT_TIME - (int(time.time() - self.dismiss_room_ts))
if self.dismiss_room_ts == 0 or dismiss_left_time >= const.DISMISS_ROOM_WAIT_TIME:
dismiss_left_time = 0
idx = 0
for p in self.players_list:
if p and p.userId == userId:
idx = p.idx
waitAidList = []
for i in range(len(self.wait_op_info_list)):
if self.wait_op_info_list[i]["idx"] == idx and self.wait_op_info_list[i]["state"] == const.OP_STATE_WAIT:
waitAidList.append(self.wait_op_info_list[i]["aid"])
DEBUG_MSG('room:{},curround:{} reconnect_room waitAidList:{}'.format(self.roomID, self.current_round, waitAidList))
DEBUG_MSG("current_idx:{} controller_discard_list:{} deskPokerList:{} curround_score:{}".format(self.current_idx, self.controller_discard_list, self.deskPokerList, self.curround_score))
return {
'init_info' : self.get_init_client_dict(),
'controllerIdx' : self.current_idx,
'controller_discard_list' : self.controller_discard_list,
'curround_score' : self.curround_score,
'deskPokerList' : self.deskPokerList,
'waitIdx' : self.current_idx,
'room_state' : const.ROOM_PLAYING if self.state == const.ROOM_PLAYING else const.ROOM_WAITING,
'win_list' : self.win_list,
'curPrizeList' : self.curPrizeList,
'curScoreList' : self.curScoreList,
'player_state_list' : [1 if i in self.confirm_next_idx else 0 for i in range(self.player_num)],
'applyCloseFrom' : self.dismiss_room_from,
'applyCloseLeftTime' : dismiss_left_time,
'applyCloseStateList' : self.dismiss_room_state_list,
'player_advance_info_list' : [p.get_reconnect_client_dict(userId) for p in self.players_list if p is not None],
}
def broadcastEnterRoom(self, idx):
new_p = self.players_list[idx]
for i, p in enumerate(self.players_list):
if p is None:
continue
if i == idx:
p.mb.enterRoomSucceed(self, idx)
else:
p.mb.othersEnterRoom(new_p.get_init_client_dict())
def record_round_result(self):
# 玩家记录当局战绩
d = datetime.fromtimestamp(time.time())
round_result_d = {
'date': '-'.join([str(d.year), str(d.month), str(d.day)]),
'time': ':'.join([str(d.hour), str(d.minute)]),
'round_record': [p.get_round_result_info() for p in self.players_list if p],
'recordId': self.record_id
}
self.game_result['round_result'].append(round_result_d)
def begin_record_room(self):
# 在第一局的时候记录基本信息
if self.current_round != 1:
return
self.game_result = {
'maxRound': self.game_round,
# 'gameMaxLose': self.game_max_lose,
'roomID': self.roomID,
'user_info_list': [p.get_basic_user_info() for p in self.players_list if p]
}
self.game_result['round_result'] = []
def save_game_result(self):
DEBUG_MSG('room:{},curround:{} len:{} {}'.format(self.roomID, self.current_round, len(self.game_result.get('round_result', [])), "-save-" * 10))
if len(self.game_result['round_result']) > 0:
result_str = json.dumps(self.game_result)
for p in self.players_list:
p and p.save_game_result(result_str)
def save_agent_complete_result(self):
DEBUG_MSG('room:{},curround:{} ------ save agent complete result -----'.format(self.roomID, self.current_round))
d = self.get_agent_complete_dict()
result_str = json.dumps(d)
if self.agent:
if self.agent.isDestroyed:
import x42
for k, v in x42.GW.avatars.items():
if v.userId == self.agent.userId:
v.saveAgentRoomResult(result_str)
break
else:
ERROR_MSG("room:{},curround:{} Save AgentRoom result failed!!! agent.userId = {}".format(self.roomID, self.current_round, self.agent.userId))
else:
self.agent.saveAgentRoomResult(result_str)
def save_club_result(self):
DEBUG_MSG('room:{},curround:{} ------ save club result -----'.format(self.roomID, self.current_round))
d = self.get_club_complete_dict()
if self.club:
self.club.saveTableResult(d)
def saveRoomResult(self):
# 保存玩家的战绩记录
self.save_game_result()
# 保存代理开房的记录
if self.room_type == const.AGENT_ROOM and self.agent:
# 代理开房完成记录
self.save_agent_complete_result()
# 将房间从代理房间中删除
self.agent.agentRoomDropped(self.roomID)
# 保存茶楼的战绩
if self.room_type == const.CLUB_ROOM:
self.save_club_result()
def timeoutDestroy(self):
INFO_MSG("room:{},curround:{} timeout destroyed. room_type = {}, owner_uid = {}".format(self.roomID, self.current_round, self.room_type, self.owner_uid))
if self.current_round < 1:
self.do_drop_room()
def destroySelf(self):
self.clear_timers()
not self.isDestroyed and self.destroy()
def destroyByServer(self, reason=None):
# 此接口由GameWorld关服时调用
self.dismiss_timer = None
for p in self.players_list:
if p and p.mb:
try:
p.mb.quitRoomSucceed()
if reason:
p.mb.showTip(reason)
except:
pass
self.destroySelf()
def getSeatAbstractInfo(self):
seat = 0
for i in range(const.ROOM_PLAYER_NUMBER):
p = self.players_list[i]
if p:
seat |= 2 ** i
return seat
def getSeatDetailInfo(self):
detail = []
for p in self.players_list:
if p:
detail.append(p.get_simple_client_dict())
return detail
def getCalCostNeed(self):
return {
'pay_mode' : self.pay_mode,
} | [
"[email protected]"
] | |
31556e2ff279b0f2bc83581d282addea9f319f6a | a2dc75a80398dee58c49fa00759ac99cfefeea36 | /bluebottle/activities/migrations/0043_auto_20210420_0847.py | 1832ca60d59f7222957c239e0285e3bbb6f24d90 | [
"BSD-2-Clause"
] | permissive | onepercentclub/bluebottle | e38b0df2218772adf9febb8c6e25a2937889acc0 | 2b5f3562584137c8c9f5392265db1ab8ee8acf75 | refs/heads/master | 2023-08-29T14:01:50.565314 | 2023-08-24T11:18:58 | 2023-08-24T11:18:58 | 13,149,527 | 15 | 9 | BSD-3-Clause | 2023-09-13T10:46:20 | 2013-09-27T12:09:13 | Python | UTF-8 | Python | false | false | 1,330 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-04-20 06:47
from __future__ import unicode_literals
from django.db import migrations, connection
def create_activity_view(apps, schema_editor):
sql = """
DROP VIEW IF EXISTS activities;
CREATE VIEW activities AS
SELECT ct.model::text AS activity_type,
ac.title,
ac.id,
ac.status,
ac.created,
ac.updated
FROM {0}.activities_activity ac
LEFT JOIN {0}.time_based_dateactivity da ON da.timebasedactivity_ptr_id = ac.id
LEFT JOIN {0}.time_based_periodactivity pa ON pa.timebasedactivity_ptr_id = ac.id
LEFT JOIN {0}.funding_funding fu ON fu.activity_ptr_id = ac.id
LEFT JOIN {0}.deeds_deed de ON de.activity_ptr_id = ac.id
JOIN {0}.django_content_type ct ON ac.polymorphic_ctype_id = ct.id;
""".format(connection.tenant.schema_name)
if connection.tenant.schema_name != 'public':
schema_editor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('activities', '0042_effortcontribution_contribution_type'),
('deeds', '0007_auto_20210222_1644')
]
operations = [
migrations.RunPython(create_activity_view, migrations.RunPython.noop)
]
| [
"[email protected]"
] | |
988702a78c19d40f847900e6fd1f3b46d60d54af | 86ed811106eecf7aa3a15cf98537ef274b811ad7 | /headmasters/migrations/0009_headmasterprofile_crop_url.py | 1cdccb88138baab74eb6d4ab65b1dfaaa7729e12 | [] | no_license | SaifulAbir/Django-MIS | 934ad39beff62f0e1cbe9377738b780122989662 | d680a0a64211bc9cd7748364454c52b16398ea5c | refs/heads/master | 2022-10-19T11:57:46.087577 | 2020-02-03T10:10:08 | 2020-02-03T10:10:08 | 271,542,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # Generated by Django 2.2.4 on 2019-10-22 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('headmasters', '0008_auto_20190930_1246'),
]
operations = [
migrations.AddField(
model_name='headmasterprofile',
name='crop_url',
field=models.TextField(blank=True, default='', null=True),
),
]
| [
"[email protected]"
] | |
de894f519d533dd6183e61c9dd8f23315fa88388 | 652e6171022bb844102e191e9459e73ff2d7901b | /tests/optimizations/HardImports_2.py | 8b513f9b738753c890d6b6be85033361b2fdc0ce | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | pombredanne/Nuitka | e07ee1ba2c027c25e4feebc9751bbb0c1cb338b1 | 02e8d59a275cd7fe482cbc8100e753ff5abe39d7 | refs/heads/develop | 2022-03-16T23:55:49.295972 | 2022-02-20T14:28:23 | 2022-02-20T14:28:23 | 69,127,861 | 0 | 0 | null | 2016-09-24T21:10:20 | 2016-09-24T21:10:20 | null | UTF-8 | Python | false | false | 973 | py | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
def sysOptionalAttribute():
return sys.maxint, sys.subversion
| [
"[email protected]"
] | |
df189a233dc0b05d92ae76eda7e06be7f66882b2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/41/usersdata/112/24387/submittedfiles/gravitacional.py | 888bc07241bd9b1b980d67c03c96c8be908f635c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import funcoes
#ENTRADA
dimensao = input('Digite a dimensao das matrizes: ')
matrizA = raw_input('Digite a Matriz A como uma única linha entre aspas: ')
matrizD = raw_input('Digite a Matriz D como uma única linha entre aspas: ')
alfa = input('Digite o valor de alfa: ')
#PREPARANDO A ENTRADA
T = np.zeros((dimensao,dimensao))
A = np.fromstring(matrizA, sep=' ').reshape(dimensao, dimensao)
d = np.fromstring(matrizD, sep=' ').reshape(dimensao, dimensao)
#comece aqui...
#INÍCIO
#SAÍDA
somatorio = sum(sum(T))
print('%.4f' % somatorio)
| [
"[email protected]"
] | |
4351140b637b391a04bca70933beb6392331991c | a56252fda5c9e42eff04792c6e16e413ad51ba1a | /resources/usr/local/lib/python2.7/dist-packages/sklearn/grid_search.py | babf7a6b2e77b260bd4677d8489191de20e18243 | [
"Apache-2.0"
] | permissive | edawson/parliament2 | 4231e692565dbecf99d09148e75c00750e6797c4 | 2632aa3484ef64c9539c4885026b705b737f6d1e | refs/heads/master | 2021-06-21T23:13:29.482239 | 2020-12-07T21:10:08 | 2020-12-07T21:10:08 | 150,246,745 | 0 | 0 | Apache-2.0 | 2019-09-11T03:22:55 | 2018-09-25T10:21:03 | Python | UTF-8 | Python | false | false | 32,847 | py | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import numbers
import operator
import time
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed, logger
from .externals import six
from .utils import safe_mask, check_random_state
from .utils.validation import _num_samples, check_arrays
from .metrics.scorer import _deprecate_loss_and_score_funcs
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list
# XXX Why? The behavior when passing a list is undocumented,
# but not doing this breaks one of the tests.
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
class IterGrid(ParameterGrid):
"""Generators on the combination of the various parameter lists given.
This class is DEPRECATED. It was renamed to ``ParameterGrid``. The name
``IterGrid`` will be removed in 0.15.
Parameters
----------
param_grid : dict of string to sequence
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to one of its
allowed values.
Examples
--------
>>> from sklearn.grid_search import IterGrid
>>> param_grid = {'a':[1, 2], 'b':[True, False]}
>>> list(IterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
See also
--------
:class:`GridSearchCV`:
uses ``IterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
warnings.warn("IterGrid was renamed to ParameterGrid and will be"
" removed in 0.15.", DeprecationWarning)
super(IterGrid, self).__init__(param_grid)
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
rnd = check_random_state(self.random_state)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, base_estimator, parameters, train, test, scorer,
verbose, loss_func=None, **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
base_estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on base_estimator clone for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
if verbose > 1:
start_time = time.time()
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[GridSearchCV] %s %s" % (msg, (64 - len(msg)) * '.'))
# update parameters of the classifier after a copy of its base structure
clf = clone(base_estimator)
clf.set_params(**parameters)
if hasattr(base_estimator, 'kernel') and callable(base_estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(base_estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_train = [X[idx] for idx in train]
X_test = [X[idx] for idx in test]
else:
if getattr(base_estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
X_train = X[np.ix_(train, train)]
X_test = X[np.ix_(test, train)]
else:
X_train = X[safe_mask(X, train)]
X_test = X[safe_mask(X, test)]
if y is not None:
y_test = y[safe_mask(y, test)]
y_train = y[safe_mask(y, train)]
clf.fit(X_train, y_train, **fit_params)
if scorer is not None:
this_score = scorer(clf, X_test, y_test)
else:
this_score = clf.score(X_test, y_test)
else:
clf.fit(X_train, **fit_params)
if scorer is not None:
this_score = scorer(clf, X_test)
else:
this_score = clf.score(X_test)
if not isinstance(this_score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s)"
" instead." % (str(this_score), type(this_score)))
if verbose > 2:
msg += ", score=%f" % this_score
if verbose > 1:
end_msg = "%s -%s" % (msg,
logger.short_format_time(time.time() -
start_time))
print("[GridSearchCV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
return this_score, parameters, _num_samples(X_test)
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if not True in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None, loss_func=None,
score_func=None, fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs'):
self.scoring = scoring
self.estimator = estimator
self.loss_func = loss_func
self.score_func = score_func
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self._check_estimator()
def score(self, X, y=None):
"""Returns the score on the given test data and labels, if the search
estimator has been refit. The ``score`` function of the best estimator
is used, or the ``scoring`` parameter where unavailable.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
if hasattr(self.best_estimator_, 'score'):
return self.best_estimator_.score(X, y)
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
return self.scorer_(self.best_estimator_, X, y)
@property
def predict(self):
return self.best_estimator_.predict
@property
def predict_proba(self):
return self.best_estimator_.predict_proba
@property
def decision_function(self):
return self.best_estimator_.decision_function
@property
def transform(self):
return self.best_estimator_.transform
def _check_estimator(self):
"""Check that estimator can be fitted and score can be computed."""
if (not hasattr(self.estimator, 'fit') or
not (hasattr(self.estimator, 'predict')
or hasattr(self.estimator, 'score'))):
raise TypeError("estimator should a be an estimator implementing"
" 'fit' and 'predict' or 'score' methods,"
" %s (type %s) was passed" %
(self.estimator, type(self.estimator)))
if (self.scoring is None and self.loss_func is None and self.score_func
is None):
if not hasattr(self.estimator, 'score'):
raise TypeError(
"If no scoring is specified, the estimator passed "
"should have a 'score' method. The estimator %s "
"does not." % self.estimator)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
n_samples = _num_samples(X)
X, y = check_arrays(X, y, allow_lists=True, sparse_format='csr')
self.scorer_ = _deprecate_loss_and_score_funcs(
self.loss_func, self.score_func, self.scoring)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
y = np.asarray(y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch)(
delayed(fit_grid_point)(
X, y, base_estimator, parameters, train, test,
self.scorer_, self.verbose, **self.fit_params)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, parameters, this_n_test_samples in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, optional
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=..., verbose=False),
fit_params={}, iid=..., loss_func=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=..., score_func=...,
scoring=..., verbose=...)
Attributes
----------
`grid_scores_` : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
`best_estimator_` : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data.
`best_score_` : float
Score of best_estimator on the left out data.
`best_params_` : dict
Parameter setting that gave the best results on the hold out data.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
"""
def __init__(self, estimator, param_grid, scoring=None, loss_func=None,
score_func=None, fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs'):
super(GridSearchCV, self).__init__(
estimator, scoring, loss_func, score_func, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, **params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
if params:
warnings.warn("Additional parameters to GridSearchCV are ignored!"
" The params argument will be removed in 0.15.",
DeprecationWarning)
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, optional
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
Attributes
----------
`grid_scores_` : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
`best_estimator_` : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data.
`best_score_` : float
Score of best_estimator on the left out data.
`best_params_` : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| [
"[email protected]"
] | |
b2e82b041ae60991fc2615856e24aaef9e02e41b | 72a58c62d62210e853ef09fdee65bf6ffb8972bd | /src/lib/telegram/bot.py | 1814abd2853af3c3711e9a156c5eb5f318c7699b | [
"MIT"
] | permissive | thonkify/thonkify | 93ade2489f20fb80c5e8e27fe67b9b231ada62bd | 2cb4493d796746cb46c8519a100ef3ef128a761a | refs/heads/master | 2023-09-01T00:03:10.398583 | 2018-03-16T09:18:24 | 2018-03-16T09:18:24 | 99,354,595 | 17 | 3 | MIT | 2023-09-05T02:27:42 | 2017-08-04T15:10:50 | Python | UTF-8 | Python | false | false | 84,929 | py | #!/usr/bin/env python
# pylint: disable=E0611,E0213,E1102,C0103,E1101,W0613,R0913,R0904
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Bot."""
import functools
import logging
import warnings
from telegram import (User, Message, Update, Chat, ChatMember, UserProfilePhotos, File,
ReplyMarkup, TelegramObject, WebhookInfo, GameHighScore)
from telegram.error import InvalidToken, TelegramError
from telegram.utils.request import Request
logging.getLogger(__name__).addHandler(logging.NullHandler())
def info(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
if not self.bot:
self.get_me()
result = func(self, *args, **kwargs)
return result
return decorator
def log(func):
logger = logging.getLogger(func.__module__)
@functools.wraps(func)
def decorator(self, *args, **kwargs):
logger.debug('Entering: %s', func.__name__)
result = func(self, *args, **kwargs)
logger.debug(result)
logger.debug('Exiting: %s', func.__name__)
return result
return decorator
def message(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
url, data = func(self, *args, **kwargs)
return self._message_wrapper(url, data, *args, **kwargs)
return decorator
class Bot(TelegramObject):
"""This object represents a Telegram Bot.
Properties:
id (int): Unique identifier for this bot.
first_name (str): Bot's first name.
last_name (str): Bot's last name.
username (str): Bot's username.
name (str): Bot's @username.
Args:
token (str): Bot's unique authentication.
base_url (Optional[str]): Telegram Bot API service URL.
base_file_url (Optional[str]): Telegram Bot API file URL.
request (Optional[Request]): Pre initialized `Request` class.
"""
def __init__(self, token, base_url=None, base_file_url=None, request=None):
self.token = self._validate_token(token)
if base_url is None:
base_url = 'https://api.telegram.org/bot'
if base_file_url is None:
base_file_url = 'https://api.telegram.org/file/bot'
self.base_url = str(base_url) + str(self.token)
self.base_file_url = str(base_file_url) + str(self.token)
self.bot = None
self._request = request or Request()
self.logger = logging.getLogger(__name__)
@property
def request(self):
return self._request
@staticmethod
def _validate_token(token):
"""a very basic validation on token"""
if any(x.isspace() for x in token):
raise InvalidToken()
left, sep, _right = token.partition(':')
if (not sep) or (not left.isdigit()) or (len(left) < 3):
raise InvalidToken()
return token
@property
@info
def id(self):
return self.bot.id
@property
@info
def first_name(self):
return self.bot.first_name
@property
@info
def last_name(self):
return self.bot.last_name
@property
@info
def username(self):
return self.bot.username
@property
def name(self):
return '@{0}'.format(self.username)
def _message_wrapper(self, url, data, *args, **kwargs):
if kwargs.get('reply_to_message_id'):
data['reply_to_message_id'] = kwargs.get('reply_to_message_id')
if kwargs.get('disable_notification'):
data['disable_notification'] = kwargs.get('disable_notification')
if kwargs.get('reply_markup'):
reply_markup = kwargs.get('reply_markup')
if isinstance(reply_markup, ReplyMarkup):
data['reply_markup'] = reply_markup.to_json()
else:
data['reply_markup'] = reply_markup
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
if result is True:
return result
return Message.de_json(result, self)
@log
def get_me(self, timeout=None, **kwargs):
"""A simple method for testing your bot's auth token.
Args:
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
Returns:
:class:`telegram.User`: A :class:`telegram.User` instance representing that bot if the
credentials are valid, `None` otherwise.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getMe'.format(self.base_url)
result = self._request.get(url, timeout=timeout)
self.bot = User.de_json(result, self)
return self.bot
@log
@message
def send_message(self,
chat_id,
text,
parse_mode=None,
disable_web_page_preview=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send text messages.
Args:
chat_id (int|str): Unique identifier for the target chat or
username of the target channel (in the format
@channelusername).
text (str): Text of the message to be sent. The current maximum
length is 4096 UTF-8 characters.
parse_mode (Optional[str]): Send Markdown or HTML, if you want
Telegram apps to show bold, italic, fixed-width text or inline
URLs in your bot's message.
disable_web_page_preview (Optional[bool]): Disables link previews
for links in this message.
disable_notification (Optional[bool]): Sends the message silently.
iOS users will not receive a notification, Android users will
receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply,
ID of the original message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional
interface options. A JSON-serialized object for an inline
keyboard, custom reply keyboard, instructions to remove reply
keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'text': text}
if parse_mode:
data['parse_mode'] = parse_mode
if disable_web_page_preview:
data['disable_web_page_preview'] = disable_web_page_preview
return url, data
@log
def delete_message(self, chat_id, message_id, timeout=None, **kwargs):
"""Use this method to delete a message. A message can only be deleted if it was sent less
than 48 hours ago. Any such recently sent outgoing message may be deleted. Additionally,
if the bot is an administrator in a group chat, it can delete any message. If the bot is
an administrator in a supergroup, it can delete messages from any other user and service
messages about people joining or leaving the group (other types of service messages may
only be removed by the group creator). In channels, bots can only remove their own
messages.
Args:
chat_id (int|str): Unique identifier for the target chat or
username of the target channel (in the format
@channelusername).
message_id (int): Unique message identifier.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'message_id': message_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
@message
def forward_message(self,
chat_id,
from_chat_id,
message_id,
disable_notification=False,
timeout=None,
**kwargs):
"""Use this method to forward messages of any kind.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
from_chat_id (int|str): Unique identifier for the chat where the original message was
sent - Chat id.
message_id (int): Unique message identifier.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message forwarded.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/forwardMessage'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if from_chat_id:
data['from_chat_id'] = from_chat_id
if message_id:
data['message_id'] = message_id
return url, data
@log
@message
def send_photo(self,
chat_id,
photo,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send photos.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
photo: Photo to send. You can either pass a file_id as String to resend a photo that is
already on the Telegram servers, or upload a new photo using multipart/form-data.
caption (Optional[str]): Photo caption (may also be used when resending photos by
file_id).
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendPhoto'.format(self.base_url)
data = {'chat_id': chat_id, 'photo': photo}
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_audio(self,
chat_id,
audio,
duration=None,
performer=None,
title=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send audio files, if you want Telegram clients to
display them in the music player. Your audio must be in an .mp3 format.
On success, the sent Message is returned. Bots can currently send audio
files of up to 50 MB in size, this limit may be changed in the future.
For backward compatibility, when both fields title and description are
empty and mime-type of the sent file is not "audio/mpeg", file is sent
as playable voice message. In this case, your audio must be in an .ogg
file encoded with OPUS. This will be removed in the future. You need to
use sendVoice method instead.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
audio: Audio file to send. You can either pass a file_id as String to resend an audio
that is already on the Telegram servers, or upload a new audio file using
multipart/form-data.
duration (Optional[int]): Duration of sent audio in seconds.
performer (Optional[str]): Performer of sent audio.
title (Optional[str]): Title of sent audio.
caption (Optional[str]): Audio caption
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendAudio'.format(self.base_url)
data = {'chat_id': chat_id, 'audio': audio}
if duration:
data['duration'] = duration
if performer:
data['performer'] = performer
if title:
data['title'] = title
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_document(self,
chat_id,
document,
filename=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send general files.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
document: File to send. You can either pass a file_id as String to resend a file that
is already on the Telegram servers, or upload a new file using multipart/form-data.
filename (Optional[str]): File name that shows in telegram message (it is useful when
you send file generated by temp module, for example).
caption (Optional[str]): Document caption (may also be used when resending documents by
file_id), 0-200 characters.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendDocument'.format(self.base_url)
data = {'chat_id': chat_id, 'document': document}
if filename:
data['filename'] = filename
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_sticker(self,
chat_id,
sticker,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send .webp stickers.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
sticker: Sticker to send. You can either pass a file_id as String to resend a sticker
that is already on the Telegram servers, or upload a new sticker using
multipart/form-data.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendSticker'.format(self.base_url)
data = {'chat_id': chat_id, 'sticker': sticker}
return url, data
@log
@message
def send_video(self,
chat_id,
video,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send video files, Telegram clients support mp4
videos (other formats may be sent as telegram.Document).
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
video: Video to send. You can either pass a file_id as String to resend a video that is
already on the Telegram servers, or upload a new video file using
multipart/form-data.
duration (Optional[int]): Duration of sent video in seconds.
caption (Optional[str]): Video caption (may also be used when resending videos by
file_id).
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideo'.format(self.base_url)
data = {'chat_id': chat_id, 'video': video}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_voice(self,
chat_id,
voice,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send audio files, if you want Telegram clients to display the file as
a playable voice message. For this to work, your audio must be in an .ogg file encoded with
OPUS (other formats may be sent as Audio or Document). On success, the sent Message is
returned. Bots can currently send audio files of up to 50 MB in size, this limit may be
changed in the future.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
voice: Audio file to send. You can either pass a file_id as String to resend an audio
that is already on the Telegram servers, or upload a new audio file using
multipart/form-data.
duration (Optional[int]): Duration of sent audio in seconds.
caption (Optional[str]): Voice caption
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVoice'.format(self.base_url)
data = {'chat_id': chat_id, 'voice': voice}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_video_note(self,
chat_id,
video_note,
duration=None,
length=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute
long. Use this method to send video messages
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
video_note (InputFile|str): Video note to send. Pass a file_id as String to send a
video note that exists on the Telegram servers (recommended) or upload a new video.
Sending video notes by a URL is currently unsupported
duration (Optional[int]): Duration of sent audio in seconds.
length (Optional[int]): Video width and height
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideoNote'.format(self.base_url)
data = {'chat_id': chat_id, 'video_note': video_note}
if duration is not None:
data['duration'] = duration
if length is not None:
data['length'] = length
return url, data
@log
@message
def send_location(self,
chat_id,
latitude,
longitude,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send point on the map.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
latitude (float): Latitude of location.
longitude (float): Longitude of location.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendLocation'.format(self.base_url)
data = {'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude}
return url, data
@log
@message
def send_venue(self,
chat_id,
latitude,
longitude,
title,
address,
foursquare_id=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to send information about a venue.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
latitude (float): Latitude of the venue.
longitude (float): Longitude of the venue.
title (str): Name of the venue.
address (str): Address of the venue.
foursquare_id (Optional[str]): Foursquare identifier of the venue.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVenue'.format(self.base_url)
data = {
'chat_id': chat_id,
'latitude': latitude,
'longitude': longitude,
'address': address,
'title': title
}
if foursquare_id:
data['foursquare_id'] = foursquare_id
return url, data
@log
@message
def send_contact(self,
chat_id,
phone_number,
first_name,
last_name=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to send phone contacts.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
phone_number (str): Contact's phone number.
first_name (str): Contact's first name.
last_name (Optional[str]): Contact's last name.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendContact'.format(self.base_url)
data = {'chat_id': chat_id, 'phone_number': phone_number, 'first_name': first_name}
if last_name:
data['last_name'] = last_name
return url, data
@log
@message
def send_game(self,
chat_id,
game_short_name,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send a game.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
game_short_name (str): Short name of the game, serves as the unique identifier for the
game.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply,
ID of the original message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options.
A JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendGame'.format(self.base_url)
data = {'chat_id': chat_id, 'game_short_name': game_short_name}
return url, data
@log
def send_chat_action(self, chat_id, action, timeout=None, **kwargs):
"""Use this method when you need to tell the user that something is happening on the bot's
side. The status is set for 5 seconds or less (when a message arrives from your bot,
Telegram clients clear its typing status).
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
action(:class:`telegram.ChatAction`|str): Type of action to broadcast. Choose one,
depending on what the user is about to receive:
- ChatAction.TYPING for text messages,
- ChatAction.UPLOAD_PHOTO for photos,
- ChatAction.UPLOAD_VIDEO for videos,
- ChatAction.UPLOAD_AUDIO for audio files,
- ChatAction.UPLOAD_DOCUMENT for general files,
- ChatAction.FIND_LOCATION for location data.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
"""
url = '{0}/sendChatAction'.format(self.base_url)
data = {'chat_id': chat_id, 'action': action}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def answer_inline_query(self,
inline_query_id,
results,
cache_time=300,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None,
timeout=None,
**kwargs):
"""Use this method to send answers to an inline query. No more than 50 results per query
are allowed.
Args:
inline_query_id (str): Unique identifier for the answered query.
results (list[:class:`telegram.InlineQueryResult`]): A list of results for the inline
query.
cache_time (Optional[int]): The maximum amount of time the result of the inline query
may be cached on the server.
is_personal (Optional[bool]): Pass `True`, if results may be cached on the server side
only for the user that sent the query. By default, results may be returned to any
user who sends the same query.
next_offset (Optional[str]): Pass the offset that a client should send in the next
query with the same text to receive more results. Pass an empty string if there are
no more results or if you don't support pagination. Offset length can't exceed 64
bytes.
switch_pm_text (Optional[str]): If passed, clients will display a button with specified
text that switches the user to a private chat with the bot and sends the bot a
start message with the parameter switch_pm_parameter.
switch_pm_parameter (Optional[str]): Parameter for the start message sent to the bot
when user presses the switch button.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/answerInlineQuery'.format(self.base_url)
results = [res.to_dict() for res in results]
data = {'inline_query_id': inline_query_id, 'results': results}
if cache_time or cache_time == 0:
data['cache_time'] = cache_time
if is_personal:
data['is_personal'] = is_personal
if next_offset is not None:
data['next_offset'] = next_offset
if switch_pm_text:
data['switch_pm_text'] = switch_pm_text
if switch_pm_parameter:
data['switch_pm_parameter'] = switch_pm_parameter
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_user_profile_photos(self, user_id, offset=None, limit=100, timeout=None, **kwargs):
"""Use this method to get a list of profile pictures for a user.
Args:
user_id (int): Unique identifier of the target user.
offset (Optional[int]): Sequential number of the first photo to be returned. By
default, all photos are returned.
limit (Optional[int]): Limits the number of photos to be retrieved. Values between
1-100 are accepted. Defaults to 100.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
list[:class:`telegram.UserProfilePhotos`]: A list of user profile photos objects is
returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getUserProfilePhotos'.format(self.base_url)
data = {'user_id': user_id}
if offset:
data['offset'] = offset
if limit:
data['limit'] = limit
result = self._request.post(url, data, timeout=timeout)
return UserProfilePhotos.de_json(result, self)
@log
def get_file(self, file_id, timeout=None, **kwargs):
"""Use this method to get basic info about a file and prepare it for downloading. For the
moment, bots can download files of up to 20MB in size.
Args:
file_id (str): File identifier to get info about.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.File`: On success, a :class:`telegram.File` object is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getFile'.format(self.base_url)
data = {'file_id': file_id}
result = self._request.post(url, data, timeout=timeout)
if result.get('file_path'):
result['file_path'] = '%s/%s' % (self.base_file_url, result['file_path'])
return File.de_json(result, self)
@log
def kick_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to kick a user from a group or a supergroup.
In the case of supergroups, the user will not be able to return to the group on their own
using invite links, etc., unless unbanned first. The bot must be an administrator in the
group for this to work.
Args:
chat_id (int|str): Unique identifier for the target group or username of the target
supergroup (in the format @supergroupusername).
user_id (int|str): Unique identifier of the target user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/kickChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def unban_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to unban a previously kicked user in a supergroup.
The user will not return to the group automatically, but will be able to join via link,
etc. The bot must be an administrator in the group for this to work.
Args:
chat_id (int|str): Unique identifier for the target group or username of the target
supergroup (in the format @supergroupusername).
user_id (int|str): Unique identifier of the target user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/unbanChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def answer_callback_query(self,
callback_query_id,
text=None,
show_alert=False,
url=None,
cache_time=None,
timeout=None,
**kwargs):
"""Use this method to send answers to callback queries sent from inline keyboards. The
answer will be displayed to the user as a notification at the top of the chat screen or as
an alert.
Args:
callback_query_id (str): Unique identifier for the query to be answered.
text (Optional[str]): Text of the notification. If not specified, nothing will be shown
to the user.
show_alert (Optional[bool]): If `True`, an alert will be shown by the client instead of
a notification at the top of the chat screen. Defaults to `False`.
url (Optional[str]): URL that will be opened by the user's client.
cache_time (Optional[int]): The maximum amount of time in seconds that the result of
the callback query may be cached client-side. Telegram apps will support caching
starting in version 3.14. Defaults to 0.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/answerCallbackQuery'.format(self.base_url)
data = {'callback_query_id': callback_query_id}
if text:
data['text'] = text
if show_alert:
data['show_alert'] = show_alert
if url:
data['url'] = url
if cache_time is not None:
data['cache_time'] = cache_time
result = self._request.post(url_, data, timeout=timeout)
return result
@log
@message
def edit_message_text(self,
text,
chat_id=None,
message_id=None,
inline_message_id=None,
parse_mode=None,
disable_web_page_preview=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to edit text messages sent by the bot or via the bot (for inline bots).
Args:
text (str): New text of the message.
chat_id (Optional[int|str]): Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel (in the format
@channelusername).
message_id (Optional[int]): Required if inline_message_id is not specified. Unique
identifier of the sent message.
inline_message_id (Optional[str]): Required if chat_id and message_id are not
specified. Identifier of the inline message.
parse_mode (:class:`telegram.ParseMode`|str): Send Markdown or HTML, if you want
Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's
message.
disable_web_page_preview (bool): Disables link previews for links in this message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the edited
message is returned, otherwise `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/editMessageText'.format(self.base_url)
data = {'text': text}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
if parse_mode:
data['parse_mode'] = parse_mode
if disable_web_page_preview:
data['disable_web_page_preview'] = disable_web_page_preview
return url, data
@log
@message
def edit_message_caption(self,
chat_id=None,
message_id=None,
inline_message_id=None,
caption=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to edit captions of messages sent by the bot or via the bot (for inline
bots).
Args:
chat_id (Optional[int|str]): Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel (in the format
@channelusername).
message_id (Optional[int]): Required if inline_message_id is not specified. Unique
identifier of the sent message.
inline_message_id (Optional[str]): Required if chat_id and message_id are not
specified. Identifier of the inline message.
caption (Optional[str]): New caption of the message.
reply_markup (Optional[:class:`telegram.InlineKeyboardMarkup`]): A JSON-serialized
object for an inline keyboard.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the edited
message is returned, otherwise `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise TelegramError(
'editMessageCaption: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageCaption'.format(self.base_url)
data = {}
if caption:
data['caption'] = caption
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def edit_message_reply_markup(self,
chat_id=None,
message_id=None,
inline_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to edit only the reply markup of messages sent by the bot or via the bot
(for inline bots).
Args:
chat_id (Optional[int|str]): Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel (in the format
@channelusername).
message_id (Optional[int]): Required if inline_message_id is not specified. Unique
identifier of the sent message.
inline_message_id (Optional[str]): Required if chat_id and message_id are not
specified. Identifier of the inline message.
reply_markup (Optional[:class:`telegram.InlineKeyboardMarkup`]): A JSON-serialized
object for an inline keyboard.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by
the bot, the edited message is returned, otherwise `True` is
returned.
Raises:
:class:`telegram.TelegramError`
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise TelegramError(
'editMessageCaption: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageReplyMarkup'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
def get_updates(self,
offset=None,
limit=100,
timeout=0,
network_delay=None,
read_latency=2.,
allowed_updates=None,
**kwargs):
"""Use this method to receive incoming updates using long polling.
Args:
offset (Optional[int]): Identifier of the first update to be returned. Must be greater
by one than the highest among the identifiers of previously received updates. By
default, updates starting with the earliest unconfirmed update are returned. An
update is considered confirmed as soon as getUpdates is called with an offset
higher than its update_id.
limit (Optional[int]): Limits the number of updates to be retrieved. Values between
1-100 are accepted. Defaults to 100.
allowed_updates (Optional[list[str]]): List the types of updates you want your bot to
receive. For example, specify
``["message", "edited_channel_post", "callback_query"]`` to only receive updates of
these types. See ``telegram.Update`` for a complete list of available update types.
Specify an empty list to receive all updates regardless of type (default). If not
specified, the previous setting will be used.
Please note that this parameter doesn't affect updates created before the call to
the setWebhook, so unwanted updates may be received for a short period of time.
timeout (Optional[int]): Timeout in seconds for long polling. Defaults to 0, i.e. usual
short polling. Be careful not to set this timeout too high, as the connection might
be dropped and there's no way of knowing it immediately (so most likely the failure
will be detected after the timeout had passed).
network_delay: Deprecated. Will be honoured as `read_latency` for a while but will be
removed in the future.
read_latency (Optional[float|int]): Grace time in seconds for receiving the reply from
server. Will be added to the `timeout` value and used as the read timeout from
server (Default: 2).
**kwargs (dict): Arbitrary keyword arguments.
Notes:
The main problem with long polling is that a connection will be dropped and we won't
be getting the notification in time for it. For that, we need to use long polling, but
not too long as well read latency which is short, but not too short.
Long polling improves performance, but if it's too long and the connection is dropped
on many cases we won't know the connection dropped before the long polling timeout and
the read latency time had passed. If you experience connection timeouts, you should
tune these settings.
Returns:
list[:class:`telegram.Update`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getUpdates'.format(self.base_url)
if network_delay is not None:
warnings.warn('network_delay is deprecated, use read_latency instead')
read_latency = network_delay
data = {'timeout': timeout}
if offset:
data['offset'] = offset
if limit:
data['limit'] = limit
if allowed_updates is not None:
data['allowed_updates'] = allowed_updates
# Ideally we'd use an aggressive read timeout for the polling. However,
# * Short polling should return within 2 seconds.
# * Long polling poses a different problem: the connection might have been dropped while
# waiting for the server to return and there's no way of knowing the connection had been
# dropped in real time.
result = self._request.post(url, data, timeout=float(read_latency) + float(timeout))
if result:
self.logger.debug('Getting updates: %s', [u['update_id'] for u in result])
else:
self.logger.debug('No new updates found.')
return [Update.de_json(u, self) for u in result]
@log
def set_webhook(self,
url=None,
certificate=None,
timeout=None,
max_connections=40,
allowed_updates=None,
**kwargs):
"""Use this method to specify a url and receive incoming updates via an outgoing webhook.
Whenever there is an update for the bot, we will send an HTTPS POST request to the
specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we
will give up after a reasonable amount of attempts.
Args:
url (str): HTTPS url to send updates to. Use an empty string to remove webhook
integration.
certificate (file): Upload your public key certificate so that the root certificate in
use can be checked.
max_connections (Optional[int]): Maximum allowed number of simultaneous HTTPS
connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower
values to limit the load on your bot's server, and higher values to increase your
bot's throughput.
allowed_updates (Optional[list[str]]): List the types of updates you want your bot to
receive. For example, specify
``["message", "edited_channel_post", "callback_query"]`` to only receive updates of
these types. See ``telegram.Update`` for a complete list of available update types.
Specify an empty list to receive all updates regardless of type (default). If not
specified, the previous setting will be used.
Please note that this parameter doesn't affect updates created before the call to
the setWebhook, so unwanted updates may be received for a short period of time.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/setWebhook'.format(self.base_url)
# Backwards-compatibility: 'url' used to be named 'webhook_url'
if 'webhook_url' in kwargs:
warnings.warn("The 'webhook_url' parameter has been renamed to 'url' in accordance "
"with the API")
if url is not None:
raise ValueError("The parameters 'url' and 'webhook_url' are mutually exclusive")
url = kwargs['webhook_url']
del kwargs['webhook_url']
data = {}
if url is not None:
data['url'] = url
if certificate:
data['certificate'] = certificate
if max_connections is not None:
data['max_connections'] = max_connections
if allowed_updates is not None:
data['allowed_updates'] = allowed_updates
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def delete_webhook(self, timeout=None, **kwargs):
"""Use this method to remove webhook integration if you decide to switch back to
getUpdates. Returns True on success. Requires no parameters.
Args:
timeout (Optional[float]): If this value is specified, use it as the definitive timeout
(in seconds) for urlopen() operations.
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteWebhook'.format(self.base_url)
data = {}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def leave_chat(self, chat_id, timeout=None, **kwargs):
"""Use this method for your bot to leave a group, supergroup or channel.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/leaveChat'.format(self.base_url)
data = {'chat_id': chat_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_chat(self, chat_id, timeout=None, **kwargs):
"""Use this method to get up to date information about the chat (current name of the user
for one-on-one conversations, current username of a user, group or channel, etc.).
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Chat`: On success, :class:`telegram.Chat` is
returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChat'.format(self.base_url)
data = {'chat_id': chat_id}
result = self._request.post(url, data, timeout=timeout)
return Chat.de_json(result, self)
@log
def get_chat_administrators(self, chat_id, timeout=None, **kwargs):
"""Use this method to get a list of administrators in a chat. On success, returns an Array
of ChatMember objects that contains information about all chat administrators except other
bots. If the chat is a group or a supergroup and no administrators were appointed, only the
creator will be returned.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
list[:class:`telegram.ChatMember`]: A list of chat member objects.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatAdministrators'.format(self.base_url)
data = {'chat_id': chat_id}
result = self._request.post(url, data, timeout=timeout)
return [ChatMember.de_json(x, self) for x in result]
@log
def get_chat_members_count(self, chat_id, timeout=None, **kwargs):
"""Use this method to get the number of members in a chat.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
int: On success, an `int` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatMembersCount'.format(self.base_url)
data = {'chat_id': chat_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to get information about a member of a chat.
Args:
chat_id (int|str): Unique identifier for the target chat or username of the target
channel (in the format @channelusername).
user_id (int): Unique identifier of the target user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.ChatMember`: On success, chat member object is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
result = self._request.post(url, data, timeout=timeout)
return ChatMember.de_json(result, self)
def get_webhook_info(self, timeout=None, **kwargs):
"""Use this method to get current webhook status.
If the bot is using getUpdates, will return an object with the url field empty.
Args:
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class: `telegram.WebhookInfo`
"""
url = '{0}/getWebhookInfo'.format(self.base_url)
data = {}
result = self._request.post(url, data, timeout=timeout)
return WebhookInfo.de_json(result, self)
@log
@message
def set_game_score(self,
user_id,
score,
chat_id=None,
message_id=None,
inline_message_id=None,
edit_message=None,
force=None,
disable_edit_message=None,
timeout=None,
**kwargs):
"""Use this method to set the score of the specified user in a game.
Args:
user_id (int): User identifier.
score (int): New score, must be non-negative.
chat_id (Optional[int|str]): Required if `inline_message_id` is not specified. Unique
identifier for the target chat (or username of the target channel in the format
`@channelusername`)
message_id (Optional[int]): Required if inline_message_id is not specified. Identifier
of the sent message.
inline_message_id (Optional[str]): Required if chat_id and message_id are not
specified. Identifier of the inline message.
force (Optional[bool]): Pass True, if the high score is allowed to decrease. This can
be useful when fixing mistakes or banning cheaters.
disable_edit_message (Optional[bool]): Pass True, if the game message should not be
automatically edited to include the current scoreboard.
edit_message (Optional[bool]): Deprecated. Has the opposite logic for
`disable_edit_message`.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message` or True: The edited message, or if the
message wasn't sent by the bot, True.
"""
url = '{0}/setGameScore'.format(self.base_url)
data = {'user_id': user_id, 'score': score}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
if force is not None:
data['force'] = force
if disable_edit_message is not None:
data['disable_edit_message'] = disable_edit_message
if edit_message is not None:
warnings.warn('edit_message is deprecated, use disable_edit_message instead')
if disable_edit_message is None:
data['edit_message'] = edit_message
else:
warnings.warn('edit_message is ignored when disable_edit_message is used')
return url, data
@log
def get_game_high_scores(self,
user_id,
chat_id=None,
message_id=None,
inline_message_id=None,
timeout=None,
**kwargs):
"""Use this method to get data for high score tables.
Args:
user_id (int): User identifier.
chat_id (Optional[int|str]): Required if `inline_message_id` is not specified. Unique
identifier for the target chat (or username of the target channel in the format
`@channelusername`)
message_id (Optional[int]): Required if inline_message_id is not specified. Identifier
of the sent message.
inline_message_id (Optional[str]): Required if chat_id and message_id are not
specified. Identifier of the inline message.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
list[:class:`telegram.GameHighScore`]: Scores of the specified user and several of his
neighbors in a game.
"""
url = '{0}/setGameScore'.format(self.base_url)
data = {'user_id': user_id}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
result = self._request.post(url, data, timeout=timeout)
return [GameHighScore.de_json(hs, self) for hs in result]
@log
@message
def send_invoice(self,
chat_id,
title,
description,
payload,
provider_token,
start_parameter,
currency,
prices,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to send invoices.
Args:
chat_id (int|str): Unique identifier for the target private chat
title (str): Product name
description (str): Product description
payload (str): Bot-defined invoice payload, 1-128 bytes. This will not be displayed
to the user, use for your internal processes.
provider_token (str): Payments provider token, obtained via Botfather
start_parameter (str): Unique deep-linking parameter that can be used to generate
this invoice when used as a start parameter
currency (str): Three-letter ISO 4217 currency code
prices (List[:class:`telegram.LabeledPrice`]): Price breakdown, a list of components
(e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.)
photo_url (Optional[str]): URL of the product photo for the invoice. Can be a photo of
the goods or a marketing image for a service. People like it better when they
see what they are paying for.
photo_size (Optional[str]): Photo size
photo_width (Optional[int]): Photo width
photo_height (Optional[int]): Photo height
need_name (Optional[bool]): Pass True, if you require the user's full name to complete
the order
need_phone_number (Optional[bool]): Pass True, if you require the user's phone number
to complete the order
need_email (Optional[bool]): Pass True, if you require the user's email to
complete the order
need_shipping_address (Optional[bool]): Pass True, if you require the user's shipping
address to complete the order
is_flexible (Optional[bool]): Pass True, if the final price depends on the shipping
method
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options.
An inlinekeyboard. If empty, one 'Pay total price' button will be shown. If not
empty, the first button must be a Pay button.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendInvoice'.format(self.base_url)
data = {
'chat_id': chat_id,
'title': title,
'description': description,
'payload': payload,
'provider_token': provider_token,
'start_parameter': start_parameter,
'currency': currency,
'prices': [p.to_dict() for p in prices]
}
if photo_url is not None:
data['photo_url'] = photo_url
if photo_size is not None:
data['photo_size'] = photo_size
if photo_width is not None:
data['photo_width'] = photo_width
if photo_height is not None:
data['photo_height'] = photo_height
if need_name is not None:
data['need_name'] = need_name
if need_phone_number is not None:
data['need_phone_number'] = need_phone_number
if need_email is not None:
data['need_email'] = need_email
if need_shipping_address is not None:
data['need_shipping_address'] = need_shipping_address
if is_flexible is not None:
data['is_flexible'] = is_flexible
return url, data
@log
def answer_shipping_query(self,
shipping_query_id,
ok,
shipping_options=None,
error_message=None,
timeout=None,
**kwargs):
"""
If you sent an invoice requesting a shipping address and the parameter is_flexible was
specified, the Bot API will send an Update with a shipping_query field to the bot. Use
this method to reply to shipping queries.
Args:
shipping_query_id (str): Unique identifier for the query to be answered
ok (bool): Specify True if delivery to the specified address is possible and False if
there are any problems (for example, if delivery to the specified address
is not possible)
shipping_options (Optional[List[:class:`telegram.ShippingOption`]]): Required if ok is
True. A list of available shipping options.
error_message (Optional[str]): Required if ok is False. Error message in human readable
form that explains why it is impossible to complete the order (e.g. "Sorry,
delivery to your desired address is unavailable'). Telegram will display this
message to the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
ok = bool(ok)
if ok and (shipping_options is None or error_message is not None):
raise TelegramError(
'answerShippingQuery: If ok is True, shipping_options '
'should not be empty and there should not be error_message')
if not ok and (shipping_options is not None or error_message is None):
raise TelegramError(
'answerShippingQuery: If ok is False, error_message '
'should not be empty and there should not be shipping_options')
url_ = '{0}/answerShippingQuery'.format(self.base_url)
data = {'shipping_query_id': shipping_query_id, 'ok': ok}
if ok:
data['shipping_options'] = [option.to_dict() for option in shipping_options]
if error_message is not None:
data['error_message'] = error_message
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def answer_pre_checkout_query(self, pre_checkout_query_id, ok,
error_message=None, timeout=None, **kwargs):
"""
If you sent an invoice requesting a shipping address and the parameter is_flexible was
specified, the Bot API will send an Update with a shipping_query field to the bot.
Use this method to reply to shipping queries.
Args:
pre_checkout_query_id (str): Unique identifier for the query to be answered
ok (bool): Specify True if everything is alright (goods are available, etc.) and the
bot is ready to proceed with the order. Use False if there are any problems.
error_message (Optional[str]): Required if ok is False. Error message in human readable
form that explains the reason for failure to proceed with the checkout (e.g.
"Sorry, somebody just bought the last of our amazing black T-shirts while you were
busy filling out your payment details. Please choose a different color or
garment!"). Telegram will display this message to the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
bool: On success, `True` is returned.
Raises:
:class:`telegram.TelegramError`
"""
ok = bool(ok)
if not (ok ^ (error_message is not None)):
raise TelegramError(
'answerPreCheckoutQuery: If ok is True, there should '
'not be error_message; if ok is False, error_message '
'should not be empty')
url_ = '{0}/answerPreCheckoutQuery'.format(self.base_url)
data = {'pre_checkout_query_id': pre_checkout_query_id, 'ok': ok}
if error_message is not None:
data['error_message'] = error_message
result = self._request.post(url_, data, timeout=timeout)
return result
@staticmethod
def de_json(data, bot):
data = super(Bot, Bot).de_json(data, bot)
return Bot(**data)
def to_dict(self):
data = {'id': self.id, 'username': self.username, 'first_name': self.username}
if self.last_name:
data['last_name'] = self.last_name
return data
def __reduce__(self):
return (self.__class__, (self.token, self.base_url.replace(self.token, ''),
self.base_file_url.replace(self.token, '')))
# camelCase aliases
getMe = get_me
sendMessage = send_message
deleteMessage = delete_message
forwardMessage = forward_message
sendPhoto = send_photo
sendAudio = send_audio
sendDocument = send_document
sendSticker = send_sticker
sendVideo = send_video
sendVoice = send_voice
sendVideoNote = send_video_note
sendLocation = send_location
sendVenue = send_venue
sendContact = send_contact
sendGame = send_game
sendChatAction = send_chat_action
answerInlineQuery = answer_inline_query
getUserProfilePhotos = get_user_profile_photos
getFile = get_file
kickChatMember = kick_chat_member
unbanChatMember = unban_chat_member
answerCallbackQuery = answer_callback_query
editMessageText = edit_message_text
editMessageCaption = edit_message_caption
editMessageReplyMarkup = edit_message_reply_markup
getUpdates = get_updates
setWebhook = set_webhook
deleteWebhook = delete_webhook
leaveChat = leave_chat
getChat = get_chat
getChatAdministrators = get_chat_administrators
getChatMember = get_chat_member
getChatMembersCount = get_chat_members_count
getWebhookInfo = get_webhook_info
setGameScore = set_game_score
getGameHighScores = get_game_high_scores
sendInvoice = send_invoice
answerShippingQuery = answer_shipping_query
answerPreCheckoutQuery = answer_pre_checkout_query
| [
"[email protected]"
] | |
1ef5a65135c034f3e78359e2d7b635ff06eb63f5 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Introdução à Programação-Python/Capitulo 10/Exemplos/nome.py | 08708c5df927089c0f4d9b9c55738c7715ea25a2 | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # Program: nome.py
# Author: Ramon R. Valeriano
# Decription:
# Developed: 28/02/2020 - 11:28
class Nome:
def __init__(self, nome):
if nome == None or not nome.strip():
raise ValueError('Nome não pode ser nulo nem em branco.')
self.nome = nome
self.chave = nome.strip().lower()
def __str__(self):
return self.nome
def __repr__(self):
return f'<Class {type(self).__name__} em 0x{id(self):x} Nome: {self.nome}'
def __eq__(self, outro):
print('__eq__ Chamado')
return self.nome == outro
def __lt__(self, outro):
print('__lt__ Chamado')
return self.nome < outro | [
"[email protected]"
] | |
d9abb2e4a97bc4acab4889f0068a81752db2542f | 2486e0cc147230a5d69c6d052217b9f3c5a4d1a8 | /Bindings/Python/setup.py.in | d3da821e0d0ecbd48f6d71c80c0154b247cb4a75 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bit20090138/opensim-core | 8b68e13a2e5e0e538651c3f7940d8bed7a8a4fe3 | de812be879d7271be92d71ac01c689a3b29e4629 | refs/heads/master | 2021-01-18T05:13:41.479462 | 2016-04-29T00:12:56 | 2016-04-29T00:12:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | in | #!/usr/bin/env python
import os
from setuptools import setup
setup(name='opensim',
version='@OPENSIM_VERSION@',
description='OpenSim Simulation Framework',
author='OpenSim Team',
author_email='[email protected]',
url='http://opensim.stanford.edu/',
license='Apache 2.0',
packages=['opensim'],
package_data={'opensim': ['_*.*']},
include_package_data=True,
classifiers=[
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Physics',
],
)
| [
"[email protected]"
] | |
9513a2411bfa39e1bbf4be4a084440f59c0b600b | a752920841038f1f84df06779ff041d6c1100697 | /pypinyin/contrib/neutral_tone.pyi | 774407edb8eff8fa0f780e5f8c4d3300f2351d42 | [
"MIT"
] | permissive | mozillazg/python-pinyin | 06e5eaa5326b642d50aacbe71b7117ac6024b353 | 6a306a6ec0148502ae4e689a229340555ecb6333 | refs/heads/master | 2023-08-31T14:13:44.512972 | 2023-05-14T12:18:47 | 2023-05-14T12:18:47 | 12,830,126 | 4,564 | 634 | MIT | 2023-09-09T03:46:41 | 2013-09-14T14:01:40 | Python | UTF-8 | Python | false | false | 518 | pyi | # -*- coding: utf-8 -*-
from typing import Any
from typing import Optional
from typing import Text
from typing import Tuple
from pypinyin.constants import Style
TStyle = Style
class NeutralToneWith5Mixin(object):
NUMBER_TONE = ... # type: Tuple[TStyle]
NUMBER_AT_END = ... # type: Tuple[TStyle]
def post_convert_style(self, han: Text, orig_pinyin: Text,
converted_pinyin: Text, style: TStyle,
strict: bool, **kwargs: Any) -> Optional[Text]: ...
| [
"[email protected]"
] | |
860fc2572ff197af9c82e05aa40b80bb2e6c03c2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02889/s701044419.py | 15a300e4e766231cfc0aae20a07a2905af0d690c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | import sys
input = sys.stdin.readline
def calc(N, g):
for k in range(N):
for i in range(N):
for j in range(N):
g[i][j] = min(g[i][j], g[i][k] + g[k][j])
def main():
N, M, L = map(int, input().split())
adj = [{} for _ in range(N)]
for _ in range(M):
A, B, C = map(int, input().split())
adj[A-1][B-1] = C
adj[B-1][A-1] = C
dst = [[float("inf")] * N for _ in range(N)]
for i in range(N):
dst[i][i] = 0
for j in adj[i]:
dst[i][j] = adj[i][j]
calc(N, dst)
ans = [[float("inf")] * N for _ in range(N)]
for i in range(N):
ans[i][i] = 0
for j in range(i+1, N):
if dst[i][j] <= L:
ans[i][j] = 1
ans[j][i] = 1
calc(N, ans)
Q = int(input())
for _ in range(Q):
s, t = map(int, input().split())
x = ans[s-1][t-1]
print(-1 if x == float("inf") else x-1)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7729b8232868cb134a2e295ce3058f8047fe5360 | f24050f9be7f089ebe97857b4d6dc67a7dda17a8 | /poetry/pozzi/python/lite_call_runtime_top.py | ce7c56220fe7521b3d4e5f2d6867c1d30028cf4b | [] | no_license | ntsourakis/regulus-python | 63bffdfbf5ba5c09e60e3d729d310edfd961d79b | 91830264e0476ccaaf7ccec83e8bb8ca32a9a4fe | refs/heads/master | 2020-04-24T10:33:49.220740 | 2019-04-29T17:11:33 | 2019-04-29T17:11:33 | 171,897,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,628 | py | #!/usr/bin/python
import pozzi.python.lite_call_runtime as call_main
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
# Use this to load table file from canonical place in zipfile (speech interface)
def init():
TableFile = 'call_tables.data.gz'
MatchingFile = 'robust_matching_tables.data.gz'
return call_main.init_state(TableFile, MatchingFile)
# Use this to load table file from canonical place in zipfile (web-server interface)
def init_basic():
TableFile = dir_path + '/call_tables.data.gz'
MatchingFile = dir_path + '/robust_matching_tables.data.gz'
return call_main.init_state_basic(TableFile, MatchingFile)
# Top-level call for Alexa version: string to string
def string_and_state_to_action(String, State):
return call_main.string_and_state_to_action_main(String, State)
# Top-level call for web-server version: json to json
def message_and_state_to_message(Message, State):
return call_main.process_call_message(Message, State)
# Top-level call for doing robust matching (either version)
def robust_match(String, State, N):
return call_main.robust_match_string(String, State, N)
# Convenient for testing on local machine (Alexa apps)
def init_local(Dir0):
LocalCompiledDir = 'c:/cygwin64/home/speech/reguluslitecontent-svn/trunk/litecontent/alexa_content/compiled/'
Dir = LocalCompiledDir + Dir0 + '/'
TableFile = Dir + 'call_tables.data.gz'
MatchingFile = Dir + 'robust_matching_tables.data.gz'
return call_main.init_state(TableFile, MatchingFile)
# Possible values:
# 'quel_animal'
# 'zahlenspiel'
# 'welches_tier'
# 'number_game'
# 'which_language'
# 'which_movie'
# 'jeu_de_chiffres'
# 'quelle_langue'
# Convenient for testing on local machine (web-server apps)
def init_dante():
Dir = 'c:/cygwin64/home/speech/reguluslitecontent-svn/trunk/litecontent/alexa_content/compiled/dante/'
TableFile = Dir + 'call_tables.data.gz'
return call_main.init_state_basic(TableFile)
# import lite_call_runtime_top as call
# (State, Init, Bad) = call.init_local('quelle_langue')
# call.string_and_state_to_action('aide', State)
# call.robust_match('vassili', State, 2)
# State = call.init_dante()
# call.message_and_state_to_message(['get_available_lessons'], State)
# call.message_and_state_to_message(['set_lesson_by_name', 'Inferno I 1-30'], State)
# call.message_and_state_to_message(['help_file'], State)
# call.message_and_state_to_message(['spoken_help'], State)
# call.message_and_state_to_message(['match', 'mi ritrovai per una selva oscura'], State)
| [
"[email protected]"
] | |
5101714fc9c01a1ff9534e8afcec0f66f825348c | 742956eb16ebc9ec802929a3ffde7377bbdd461f | /hackbright.py | 1ab453681028f9eded0400da2822ef13a84ab553 | [] | no_license | mashikro/hb-project-tracker-flask | bb7a964d1d1be90f2f6a608f84766ad4783ad87e | 7a3fc23324d9ab2de3b3c8fc72164f32e8a6494f | refs/heads/master | 2021-06-24T14:39:31.201648 | 2019-10-24T00:53:45 | 2019-10-24T00:53:45 | 217,180,943 | 0 | 0 | null | 2021-03-20T02:01:22 | 2019-10-24T00:54:35 | Python | UTF-8 | Python | false | false | 5,311 | py | """Hackbright Project Tracker.
A front-end for a database that allows users to work with students, class
projects, and the grades students receive in class projects.
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
def connect_to_db(app):
"""Connect the database to our Flask app."""
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///hackbright'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
def get_student_by_github(github):
"""Given a GitHub account name, print info about the matching student."""
QUERY = """
SELECT first_name, last_name, github
FROM students
WHERE github = :github
"""
db_cursor = db.session.execute(QUERY, {'github': github})
row = db_cursor.fetchone()
print(f"Student: {row[0]} {row[1]}\nGitHub account: {row[2]}")
return row #returns a tuple of (first name, last name, github)
def make_new_student(first_name, last_name, github):
"""Add a new student and print confirmation.
Given a first name, last name, and GitHub account, add student to the
database and print a confirmation message.
"""
QUERY = """
INSERT INTO students (first_name, last_name, github)
VALUES (:first_name, :last_name, :github)
"""
db.session.execute(QUERY, {'first_name': first_name,
'last_name': last_name,
'github': github})
db.session.commit()
print(f"Successfully added student: {first_name} {last_name}")
def get_project_by_title(title):
"""Given a project title, print information about the project."""
QUERY = """
SELECT title, description, max_grade
FROM projects
WHERE title = :title
"""
db_cursor = db.session.execute(QUERY, {'title': title})
row = db_cursor.fetchone()
print(f"Title: {row[0]}\nDescription: {row[1]}\nMax Grade: {row[2]}")
return row
def get_grade_by_github_title(github, title):
"""Print grade student received for a project."""
QUERY = """
SELECT grade
FROM grades
WHERE student_github = :github
AND project_title = :title
"""
db_cursor = db.session.execute(QUERY, {'github': github, 'title': title})
row = db_cursor.fetchone()
print(f"Student {github} in project {title} received grade of {row[0]}")
return row
def assign_grade(github, title, grade):
"""Assign a student a grade on an assignment and print a confirmation."""
QUERY = """
INSERT INTO grades (student_github, project_title, grade)
VALUES (:github, :title, :grade)
"""
db_cursor = db.session.execute(QUERY, {'github': github,
'title': title,
'grade': grade})
db.session.commit()
print(f"Successfully assigned grade of {grade} for {github} in {title}")
def get_grades_by_github(github):
"""Get a list of all grades for a student by their github username"""
QUERY = """
SELECT project_title, grade
FROM grades
WHERE student_github = :github
"""
db_cursor = db.session.execute(QUERY, {'github': github})
rows = db_cursor.fetchall()
for row in rows:
print(f"Student {github} received grade of {row[1]} for {row[0]}")
return rows
def get_grades_by_title(title):
"""Get a list of all student grades for a project by its title"""
QUERY = """
SELECT student_github, grade
FROM grades
WHERE project_title = :title
"""
db_cursor = db.session.execute(QUERY, {'title': title})
rows = db_cursor.fetchall()
for row in rows:
print(f"Student {row[0]} received grade of {row[1]} for {title}")
return rows
def handle_input():
"""Main loop.
Repeatedly prompt for commands, performing them, until 'quit' is received
as a command.
"""
command = None
while command != "quit":
input_string = input("HBA Database> ")
tokens = input_string.split()
command = tokens[0]
args = tokens[1:]
if command == "student":
github = args[0]
get_student_by_github(github)
elif command == "new_student":
first_name, last_name, github = args # unpack!
make_new_student(first_name, last_name, github)
elif command == "project":
title = args[0]
get_project_by_title(title)
elif command == "grade":
github, title = args
get_grade_by_github_title(github, title)
elif command == "assign_grade":
github, title, grade = args
assign_grade(github, title, grade)
elif command == "student_grades":
github = args[0]
get_grades_by_github(github)
elif command == "project_grades":
title = args[0]
get_grades_by_title(title)
if __name__ == "__main__":
connect_to_db(app)
handle_input()
# To be tidy, we'll close our database connection -- though, since this
# is where our program ends, we'd quit anyway.
db.session.close()
| [
"[email protected]"
] | |
03695b9c55d45f662abeee3299b7e113eb881646 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinci_v39r1/tuplemaking/threesamesignmuondata/B23MuNuSignalDataTest.py | 0620ed3ebfbd3c78a4c2ec7e02684fa6c6dbcdaf | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,337 | py | # $Id: $
# Test your line(s) of the stripping
#
# NOTE: Please make a copy of this file for your testing, and do NOT change this one!
#
from Gaudi.Configuration import *
from Configurables import DaVinci
line = 'B23MuNu_TriMuLine'
location = '/Event/Semileptonic/Phys/B23MuNu_TriMuLine/Particles'
#from Configurables import DaVinci, PrintDecayTree
#pt = PrintDecayTree(Inputs = [ location ])
#DaVinci().appendToMainSequence( [ pt ] )
######### Refining the candidate
# get classes to build the SelectionSequence
from PhysSelPython.Wrappers import AutomaticData, Selection, SelectionSequence
# Get the Candidates from the DST. AutomaticData is for data on the DST
TriMuSel = AutomaticData(Location = location)
TriMuSeq = SelectionSequence('SeqTriMu',
TopSelection = TriMuSel,
)
DaVinci().appendToMainSequence( [ TriMuSeq.sequence() ] )
# DecayTreeTuple constructor
from Configurables import DecayTreeTuple
#from DecayTreeTuple.Configuration import *
from Configurables import DecayTreeTuple, FitDecayTrees, TupleToolRecoStats, TupleToolTrigger, TupleToolTISTOS, CondDB, SelDSTWriter
#from DecayTreeTuple.Configuration import *
from Configurables import DecayTreeTuple, FilterDesktop,CombineParticles,FitDecayTrees, TupleToolRecoStats, TupleToolTrigger, TupleToolTISTOS, CondDB
from DecayTreeTuple.Configuration import *
tuple = DecayTreeTuple("B_Tuple")
#tuple.Inputs = [ TriMuSeq.outputLocation() ]
tuple.Inputs = ["/Event/Semileptonic/Phys/B23MuNu_TriMuLine/Particles"]
#tuple.Inputs = ["Phys/DecayTreeFitterB"]
#tuple.ToolList = [
# "TupleToolKinematic"
# , "TupleToolEventInfo"
# , "TupleToolRecoStats"
# , "TupleToolMCTruth"
# , "TupleToolMCBackgroundInfo"
#]
tuple.ToolList = [
"TupleToolKinematic",
"TupleToolEventInfo",
"TupleToolRecoStats",
"TupleToolPid"
]
tuple.addBranches({ # remove all "^" except where needed.
"Bplus" : "^([B+ -> mu+ mu- mu+]CC)",
"mu1" : "[B+ -> ^mu+ mu- mu+]CC ",
"mu2" : "[B+ -> mu+ ^mu- mu+]CC ",
"mu3" : "[B+ -> mu+ mu- ^mu+]CC ",
})
tuple.Bplus.ToolList += [ "TupleToolTISTOS" ]
tuple.Bplus.addTool( TupleToolTISTOS, name = "TupleToolTISTOS" )
tuple.Bplus.TupleToolTISTOS.Verbose = True
tuple.Bplus.TupleToolTISTOS.TriggerList = [
"L0DiMuonDecision"
, "L0MuonDecision"
, "L0HadronDecision"
, "Hlt1TrackAllL0Decision"
, "Hlt1TrackMuonDecision"
, "Hlt1DiMuonHighMassDecision"
, "Hlt1SingleMuonHighPTDecision"
, "Hlt2TopoMu2BodyBBDTDecision"
, "Hlt2TopoMu3BodyBBDTDecision"
, "Hlt2Topo2BodyBBDTDecision"
, "Hlt2Topo3BodyBBDTDecision"
, "Hlt2DiMuonDetachedJPsiDecision"
, "Hlt2DiMuonDetachedDecision"
, "Hlt2SingleMuonDecision"
, "Hlt2DiMuonDetachedHeavyDecision"
]
LoKi_All=tuple.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_All")
LoKi_All.Variables = {
'MINIPCHI2' : "MIPCHI2DV(PRIMARY)",
'MINIP' : "MIPDV(PRIMARY)",
'ETA' : 'ETA',
'PHI' : 'PHI'
}
LoKi_Bplus=tuple.Bplus.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Bplus")
LoKi_Bplus.Variables = {
'TAU' : "BPVLTIME()",
'DIRA_OWNPV' : "BPVDIRA",
'FD_CHI2' : "BPVVDCHI2",
'ENDVERTEX_CHI2' : "VFASPF(VCHI2/VDOF)",
'X_travelled' : "VFASPF(VX)-BPV(VX)",
'Y_travelled' : "VFASPF(VY)-BPV(VY)",
'Z_travelled' : "VFASPF(VZ)-BPV(VZ)",
'P_Parallel' : "BPVDIRA*P",
'P_Perp' : "sin(acos(BPVDIRA))*P",
'BPVVDZ' : "BPVVDZ",
'Corrected_Mass' : "BPVCORRM"
}
LoKi_mu1=tuple.mu1.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu1")
LoKi_mu1.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu2=tuple.mu2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu2")
LoKi_mu2.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu3=tuple.mu3.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu3")
LoKi_mu3.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
tuple.Decay = "[B+ -> ^mu+ ^mu- ^mu+]CC"
from Configurables import DaVinci
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexDatappMuMu
tuple.mu1.addTool(TupleToolVertexDatappMuMu)
tuple.mu1.ToolList+=["TupleToolVertexDatappMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexDatapmMuMu
tuple.mu1.addTool(TupleToolVertexDatapmMuMu)
tuple.mu1.ToolList+=["TupleToolVertexDatapmMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexDatampMuMu
tuple.mu1.addTool(TupleToolVertexDatampMuMu)
tuple.mu1.ToolList+=["TupleToolVertexDatampMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexDataMuMuMu
tuple.mu1.addTool(TupleToolVertexDataMuMuMu)
tuple.mu1.ToolList+=["TupleToolVertexDataMuMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolSallyvs3
tuple.Bplus.addTool(TupleToolSallyvs3)
tuple.Bplus.ToolList+=["TupleToolSallyvs3"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolApplypMuIsolation
tuple.Bplus.addTool(TupleToolApplypMuIsolation)
tuple.Bplus.TupleToolApplypMuIsolation.OutputSuffix="_weights"
tuple.Bplus.TupleToolApplypMuIsolation.WeightsFile="weights_110614_Lc_pX.xml"
tuple.Bplus.ToolList+=["TupleToolApplypMuIsolation"]
#Mysterious things to make isolation work
name="TupleToolApplypMuIsolation"
from Configurables import ChargedProtoParticleMaker
#
veloprotos = ChargedProtoParticleMaker(name+"ProtoPMaker")
veloprotos.Inputs = ["Rec/Track/Best"]
veloprotos.Output = "Rec/ProtoP/myProtoPMaker/ProtoParticles"
#
DaVinci().appendToMainSequence( [ veloprotos ])
#
from Gaudi.Configuration import *
from Configurables import ProtoParticleCALOFilter, CombinedParticleMaker,NoPIDsParticleMaker
from CommonParticles.Utils import *
#
algorithm = NoPIDsParticleMaker('StdNoPIDsVeloPions', Particle = 'pion', )
algorithm.Input = "Rec/ProtoP/myProtoPMaker/ProtoParticles"
selector = trackSelector ( algorithm , trackTypes = ['Velo'] )
#
locations = updateDoD ( algorithm )
DaVinci().appendToMainSequence( [ algorithm ])
#
from Configurables import GaudiSequencer
MySequencer = GaudiSequencer('Sequence')
#DaVinci().HistogramFile = 'DV_stripping_histosnew2.root'
DaVinci().HistogramFile = 'DVHistosignal.root'
DaVinci().TupleFile = "DVTuplesignal.root"
DaVinci().EvtMax = 10000
DaVinci().PrintFreq = 2000
#DaVinci().appendToMainSequence( [ MySequencer ] )
DaVinci().appendToMainSequence( [ tuple] )
#DaVinci().appendToMainSequence( [ sr ] )
#DaVinci().appendToMainSequence( [ ac ] )
DaVinci().DataType = "2012"
DaVinci().InputType = "DST"
# change the column size of timing table
from Configurables import TimingAuditor, SequencerTimerTool
TimingAuditor().addTool(SequencerTimerTool,name="TIMER")
TimingAuditor().TIMER.NameSize = 60
#NTupleSvc().Output = ["FILE1 DATAFILE='trythis.root' TYP='ROOT' OPT='NEW'"]
MessageSvc().Format = "% F%60W%S%7W%R%T %0W%M"
# database
DaVinci().DDDBtag = "dddb-20120831"
DaVinci().CondDBtag = "cond-20121008"
DaVinci().Lumi = True
# input file
# importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco14_Run125113.py")
| [
"[email protected]"
] | |
b8db2d856d22439d7469fcfed29803ac47f6a361 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /Others/past/past202004-open/n.py | f5270ea558cfda85c4314921ba5f9252f01ad907 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 134 | py | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d84017fd7fe042e521b48cd24401a9e9513723e5 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/12165092.py | 401f428ce7c24c5fb7a3cddf1f6b4d44312d9daa | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12165092.py generated: Wed, 25 Jan 2017 15:25:34
#
# Event Type: 12165092
#
# ASCII decay Descriptor: [B+ -> (D+ => K- pi+ pi+) K+ pi- ]cc
#
from Configurables import Generation
Generation().EventType = 12165092
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_D+K+pi-,Kpipi=sqDalitz.dec"
Generation().SignalRepeatedHadronization.CutTool = ""
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = ""
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12165092
| [
"[email protected]"
] | |
a43676dc807faaf5ff96bb4b2e5d3f8aee04c123 | 9cc3135d5fcd781c0542a905c61dc19b0ceeffef | /file_reader_line.py | 66d355dba2173774df41579cbc1cc0eb3dafa21e | [] | no_license | bkalcho/python-crash-course | 411d8af223fb6974d4f890c0f82c9e56b062359c | 8425649a2ecd5abeeb438e816400f270d937758e | refs/heads/master | 2022-09-11T13:47:56.837256 | 2022-08-23T10:04:35 | 2022-08-23T10:04:35 | 69,810,386 | 14 | 8 | null | 2022-08-23T10:04:36 | 2016-10-02T17:14:41 | Python | UTF-8 | Python | false | false | 250 | py | # Author: Bojan G. Kalicanin
# Date: 05-Oct-2016
# Program that reads file line by line and prints line by line
# on the stdout
filename = 'pi_digits.txt'
with open(filename) as file_object:
for line in file_object:
print(line.rstrip()) | [
"[email protected]"
] | |
0316e6973b49d6d7e409eef2969a5e273989a715 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03437/s534128723.py | 39f835745095b832d314d35d1a94be11cc79a200 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from sys import stdin
import fractions
def lcm(x, y):
return (x * y) // fractions.gcd(x, y)
n,m = [int(x) for x in stdin.readline().rstrip().split()]
if lcm(n,m) == n:
print(-1)
else:
print(n)
| [
"[email protected]"
] | |
473f81fd11b029ce5acfd36114b5f5d320f145cd | 6c5daf5133656a33574dc2f5b62b9f1a1bdf1390 | /Elec Power Chongqing/2021/old/dataForecasting.py | fdbd4edc3a5ba2ca8971ae189ad93166be7ca73e | [] | no_license | RobinChen121/Python-Practices | 6c10b721dce3a8d2b76e190959d0940c52f0d1cc | 85bd9ad30c245dd62dc7ea837f964eaecbe24ed9 | refs/heads/master | 2023-08-31T10:08:01.613828 | 2023-08-27T14:51:46 | 2023-08-27T14:51:46 | 142,564,793 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 14:41:47 2021
@author: zhen chen
MIT Licence.
Python version: 3.8
Description: forecast based on the last 12 months
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf # pacf 是偏相关系数
sns.set()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
ini_data = pd.read_excel(r'sumCleanData.xlsx')
ini_data['日期'] = pd.to_datetime(ini_data['日期']).dt.strftime('%y-%m-%d')
group_data = ini_data[['日期', 'sum']].groupby('日期').sum()
group_data.plot(kind = 'line', title = '所有用户用电量之和')
result = adfuller(group_data.values)
print('ADF Statistic: %f' % result[0]) # ADF 检验稳定性的,若不稳定则做差分,直到稳定为止
print('p-value: %f' % result[1])
# 也可以通过看差分的自相关系数确定差分阶数
# 确定差分阶数 d, d = 1, 确定移动平均阶数,q = 0
# Original Series
fig, axes = plt.subplots(3, 2)
axes[0, 0].plot(group_data.values);
axes[0, 0].set_title('原始数据')
plot_acf(group_data.values, ax=axes[0, 1], title = '自相关系数')
# 1st Differencing
axes[1, 0].plot(group_data.diff().dropna().values);
axes[1, 0].set_title('一阶差分')
plot_acf(group_data.diff().dropna().values, ax=axes[1, 1], title = '自相关系数')
# 2nd Differencing
axes[2, 0].plot(group_data.diff(2).dropna().values);
axes[2, 0].set_title('二阶差分')
plot_acf(group_data.diff(2).dropna().values, ax=axes[2, 1], title = '自相关系数')
plt.setp(plt.gcf().get_axes(), xticks=[]); # gcf: get current figure
plt.show()
# print()
# result = adfuller(group_data.diff().dropna().values)
# print('ADF Statistic: %f' % result[0])
# print('p-value: %f' % result[1])
# 确定自回归阶数, p=1
fig, axes = plt.subplots(1, 2)
axes[0].plot(group_data.diff().dropna().values); axes[0].set_title('1st Differencing')
plot_pacf(group_data.diff().dropna().values, ax=axes[1])
plt.show()
# build ARIMA Model
model = sm.tsa.ARIMA(group_data.values, order=(1,1,1))
model_fit = model.fit()
print(model_fit.summary())
# # Plot residual errors
# residuals = pd.DataFrame(model_fit.resid)
# fig, ax = plt.subplots(1,2)
# residuals.plot(title="Residuals", ax=ax[0])
# residuals.plot(kind='kde', title='Density', ax=ax[1]) # 密度图 KDE:Kernel Density Estimate
# plt.show()
# Actual vs Fitted
plt.figure()
predict = model_fit.predict(start=1, end = 380)
plt.plot(range(366), group_data['sum'], 'm', label = '原始数据')
plt.plot(range(380), predict, 'r:', label = '预测数据')
plt.legend()
plt.show()
plt.savefig('forecast.png', dpi=1000)
# df1 = ini_data[ini_data.户号对应 == 1]
# df1.plot(x = '日期', y = 'sum', kind = 'line', title = '用户1的历史用电量')
# # df2 = ini_data[ini_data.户号对应 == 2]
# # df2.plot(x = '日期', y = 'sum', kind = 'line', title = '用户2的历史用电量')
# df3 = ini_data[ini_data.户号对应 == 3]
# df3.plot(x = '日期', y = 'sum', kind = 'line', title = '用户3的历史用电量')
# df4 = ini_data[ini_data.户号对应 == 4]
# df4.plot(x = '日期', y = 'sum', kind = 'line', title = '用户4的历史用电量')
# df5 = ini_data[ini_data.户号对应 == 5]
# df5.plot(x = '日期', y = 'sum', kind = 'line', title = '用户5的历史用电量')
# df6 = ini_data[ini_data.户号对应 == 6]
# df6.plot(x = '日期', y = 'sum', kind = 'line', title = '用户6的历史用电量')
# df8 = ini_data[ini_data.户号对应 == 8]
# df8.plot(x = '日期', y = 'sum', kind = 'line', title = '用户8的历史用电量')
| [
"[email protected]"
] | |
7f4af42352a202cad7474c629138bcd8f86c4a5d | 46e271e27afe50b8b62be0651d78164490911bb3 | /ws_moveit/src/pkg_task3/scripts/process_packagen2.py | 4a895d52be808e3673dd1eeae7d97e3a71b7d4dd | [] | no_license | Nidhiks2000/Vargi-bot | 8a43af1e470b6fc84d468003f67471a1e1f47aad | 3e2e7be310ed7372cb6960eea8faabec75d9fbcf | refs/heads/master | 2023-07-24T01:05:10.049800 | 2021-09-08T16:31:08 | 2021-09-08T16:31:08 | 403,935,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,887 | py | #!/usr/bin/env python
'''We have used three python scripts to implements task where each script takes care of each coloured package.
We choosed this approach considering the compution time and ease to debug
'''
# This python script processes packagen2
# ROS- PROCESS_PACKAGEN2 SCRIPT
import rospy
import sys
import copy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import actionlib
import math
import tf2_ros
import tf2_msgs.msg
import std_msgs.msg
from hrwros_gazebo.msg import LogicalCameraImage #to use logical camera feed
from pkg_vb_sim.srv import vacuumGripper #to activate vaccum gripper
from pkg_vb_sim.srv import conveyorBeltPowerMsg #to activate conveyor belt
class CartesianPath:
#Constructor
def __init__(self):
rospy.init_node('process_packagen2',anonymous = True)
self._planning_group = "ur5_1_planning_group"
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander()
self._scene = moveit_commander.PlanningSceneInterface()
self._group = moveit_commander.MoveGroupCommander(self._planning_group)
self._display_trajectory_publisher = rospy.Publisher(
'/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
self._exectute_trajectory_client = actionlib.SimpleActionClient(
'execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._tfBuffer = tf2_ros.Buffer()
self._listener = tf2_ros.TransformListener(self._tfBuffer)
self.box_name = "packagen2"
rospy.loginfo('\033[94m' + " >>> Init done." + '\033[0m')
def ee_cartesian_translation(self, trans_x, trans_y, trans_z):
# 1. Create a empty list to hold waypoints
waypoints = []
# 2. Add Current Pose to the list of waypoints
waypoints.append(self._group.get_current_pose().pose)
# 3. Create a New waypoint
wpose = geometry_msgs.msg.Pose()
wpose.position.x = waypoints[0].position.x + (trans_x)
wpose.position.y = waypoints[0].position.y + (trans_y)
wpose.position.z = waypoints[0].position.z + (trans_z)
# This to keep EE parallel to Ground Plane
wpose.orientation.x = -0.5
wpose.orientation.y = -0.5
wpose.orientation.z = 0.5
wpose.orientation.w = 0.5
# 4. Add the new waypoint to the list of waypoints
waypoints.append(copy.deepcopy(wpose))
# 5. Compute Cartesian Path connecting the waypoints in the list of waypoints
(plan, fraction) = self._group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # Step Size, distance between two adjacent computed waypoints will be 1 cm
0.0) # Jump Threshold
rospy.loginfo("Path computed successfully. Moving the arm.")
num_pts = len(plan.joint_trajectory.points)
if (num_pts >= 3):
del plan.joint_trajectory.points[0]
del plan.joint_trajectory.points[1]
# 6. Make the arm follow the Computed Cartesian Path
self._group.execute(plan)
def go_to_pose(self, arg_pose):
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Current Pose:" + '\033[0m')
rospy.loginfo(pose_values)
self._group.set_pose_target(arg_pose)
flag_plan = self._group.go(wait=True) # wait=False for Async Move
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
rospy.loginfo(pose_values)
list_joint_values = self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
if (flag_plan == True):
rospy.loginfo(
'\033[94m' + ">>> go_to_pose() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> go_to_pose() Failed. Solution for Pose not Found." + '\033[0m')
return flag_plan
def set_joint_angles(self, arg_list_joint_angles):
list_joint_values = self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Current Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
self._group.set_joint_value_target(arg_list_joint_angles)
self._group.plan()
flag_plan = self._group.go(wait=True)
list_joint_values = self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
rospy.loginfo(pose_values)
if (flag_plan == True):
rospy.loginfo(
'\033[94m' + ">>> set_joint_angles() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> set_joint_angles() Failed." + '\033[0m')
return flag_plan
def func_tf(self, arg_frame_1, arg_frame_2):
try:
trans = self._tfBuffer.lookup_transform(arg_frame_1, arg_frame_2, rospy.Time())
#creating a list to add all the transform points with offset
res = []
res.append(trans.transform.translation.x)
res.append(trans.transform.translation.y)
res.append(trans.transform.translation.z)
return res
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr("TF error")
#functions for planning script
def add_box(self,timeout=4):
box_name = self.box_name
scene = self._scene
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "world"
box_pose.pose.position.x = -0.62
box_pose.pose.position.y = 0.38
box_pose.pose.position.z = 0.97
scene.add_box(box_name, box_pose, size=(0.15, 0.15, 0.15))
def attach_box(self, timeout=4):
box_name = self.box_name
robot = self._robot
scene = self._scene
eef_link = self._eef_link
group_names = self._group_names
grasping_group = self._group_names
touch_links = robot.get_link_names(group="ur5_1_planning_group")
scene.attach_box(self._eef_link, box_name,touch_links=touch_links)
def detach_box(self, timeout=4):
box_name = self.box_name
scene = self._scene
eef_link = self._eef_link
scene.remove_attached_object(eef_link, name=box_name)
def remove_box(self, timeout=4):
box_name = self.box_name
scene = self._scene
scene.remove_world_object(box_name)
# Destructor
def __del__(self):
moveit_commander.roscpp_shutdown()
rospy.loginfo(
'\033[94m' + "Object of class CartesianPath Deleted." + '\033[0m')
class Services:
def activate_conveyor_belt(self,power):
rospy.wait_for_service("/eyrc/vb/conveyor/set_power")
try:
s = rospy.ServiceProxy("/eyrc/vb/conveyor/set_power",conveyorBeltPowerMsg)
result = s(power)
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
#activating the vaccum gripper by calling a service
def activate_vacuum_gripper(self,result):
rospy.wait_for_service("/eyrc/vb/ur5_1/activate_vacuum_gripper")
try:
s = rospy.ServiceProxy('/eyrc/vb/ur5_1/activate_vacuum_gripper', vacuumGripper)
result = s(result)
print(result)
return result
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def main():
ur5 = CartesianPath()
service = Services()
reference_frame = "world"
target_frame = "ur5_wrist_3_link"
box_length = 0.15 # Length of the Package
vacuum_gripper_width = 0.115 # Vacuum Gripper Width
delta = vacuum_gripper_width + (box_length/2) # 0.19
# Teams may use this info in Tasks
#to go to green pose
ur5_1_bin_pose = geometry_msgs.msg.Pose()
ur5_1_bin_pose.position.x = 0.637249966166
ur5_1_bin_pose.position.y = -0.0253975214324
ur5_1_bin_pose.position.z = 1.3188059935
ur5_1_bin_pose.orientation.x = 0.0280425654885
ur5_1_bin_pose.orientation.y = 0.0506628496333
ur5_1_bin_pose.orientation.z = 0.316982812125
ur5_1_bin_pose.orientation.w = 0.946661918007
lst_joint_angles = [math.radians(-12.1413692229),
math.radians(-54.3381447029),
math.radians(50.18385848),
math.radians(10.620928083),
math.radians(-49.1547078858),
math.radians(-179.745824573)]
lst_home_pose = [math.radians(7.8433489314),
math.radians(-139.942784159),
math.radians(-58.2911345789),
math.radians (-71.7204516851),
math.radians (89.9713177297),
math.radians (7.90736427846)]
def callback(data):
#detecting the packagen2
if data.models[1].type == "packagen2" :
ur5.add_box() #adding box to rviz
service.activate_conveyor_belt(9)
res = ur5.func_tf(reference_frame, target_frame)
rospy.loginfo('\033[94m' + "Translating EE to package from current position." + '\033[0m')
#translating the arm position to grab the package
ur5.ee_cartesian_translation(0,res[2],0)
ur5.ee_cartesian_translation(res[0]+1.0,0,0) #adding some offset so that it doesn't collide
service.activate_vacuum_gripper(True) #activating the vaccum
ur5.attach_box() #attaching box to the arm in Rviz
ur5.go_to_pose(ur5_pose_2) # to near the green bin
service.activate_vacuum_gripper(False) #deactivating the vaccum
ur5.detach_box() #dettaching the package from the arm
ur5.remove_box()
ur5.set_joint_angles(lst_home_pose)
service.activate_conveyor_belt(100)
rospy.signal_shutdown("Green process is done")
rospy.Subscriber("/eyrc/vb/logical_camera_2",LogicalCameraImage,callback)
rospy.spin()
if __name__ == '__main__':
main()
| [
"Happysunshine.disroot.org"
] | Happysunshine.disroot.org |
67f5b410a9c362544f83edcb25f34d9f24d4fc1f | c83e356d265a1d294733885c373d0a4c258c2d5e | /mayan/apps/rest_api/urls.py | 43be019b07eb85024ce8607bfd3fde3f44544c10 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3 | 4160809d2c96707a196b8c94ea9e4df1a119d96a | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | refs/heads/master | 2023-08-21T23:36:41.230179 | 2021-10-02T03:51:12 | 2021-10-02T03:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | from django.conf.urls import include, url
from .api_views import (
APIRoot, APIVersionRoot, BrowseableObtainAuthToken,
ProjectInformationAPIView, schema_view
)
from .literals import API_VERSION
api_version_urls = [
url(regex=r'^$', name='api_version_root', view=APIVersionRoot.as_view()),
url(
regex=r'^auth/token/obtain/$', name='auth_token_obtain',
view=BrowseableObtainAuthToken.as_view()
),
url(
regex=r'^project/$', name='project_information',
view=ProjectInformationAPIView.as_view()
)
]
api_urls = [
url(
regex=r'^swagger(?P<format>.json|.yaml)$', name='schema-json',
view=schema_view.without_ui(cache_timeout=None),
),
url(regex=r'^v{}/'.format(API_VERSION), view=include(api_version_urls)),
url(regex=r'^$', name='api_root', view=APIRoot.as_view()),
]
urlpatterns = [
url(
regex=r'^swagger/ui/$', name='schema-swagger-ui',
view=schema_view.with_ui('swagger', cache_timeout=None)
),
url(
regex=r'^redoc/ui/$', name='schema-redoc',
view=schema_view.with_ui('redoc', cache_timeout=None)
),
url(regex=r'^', view=include(api_urls)),
]
| [
"[email protected]"
] | |
0a2f3cfff69d681b3500ecf3a9d62ad75e684c68 | 431a1f738b1edfba7dad8d10a6b7520d51d917cb | /Samples/UserSamples/2018/VBFConfig.py | 0f6c4d5699d172ffbfdbde1760a8050c5fd41cbf | [] | no_license | aloeliger/DatacardCreator | 5ce702e46fbb77e843b44d8fe088c2645a4a8f66 | 5c7e890276a5be079ed3b677a471c1dcadcba52d | refs/heads/master | 2022-02-26T19:52:30.563747 | 2022-02-16T20:24:48 | 2022-02-16T20:24:48 | 215,602,523 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.Signal_JES_18 import JES18Uncertainty
from Samples.Uncertainties.UserUncertainties.JER import JERUncertainty
from Samples.Uncertainties.UserUncertainties.MetRecoil import MetRecoilUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
#from Samples.Uncertainties.UserUncertainties.Prefiring import PrefiringUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
from Samples.Uncertainties.UserUncertainties.qqHTheory import qqHTheoryUncertainty
from Samples.Uncertainties.UserUncertainties.QCDAcceptanceUncertainties.qqH_QCD_AcceptanceUncertainties.qqH_scale_Inclusive_Uncertainty import qqH_scale_Inclusive_Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
VBFSample = Sample()
VBFSample.name = 'qqH_htt125'
VBFSample.path = '/data/aloeliger/SMHTT_Selected_2018_Deep/'
VBFSample.files = ['VBF.root']
VBFSample.definition = ''
VBFSample.uncertainties = [
TESUncertainty(),
JES18Uncertainty(),
JERUncertainty(),
MetRecoilUncertainty(),
MuonESUncertainty(),
# PrefiringUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
qqHTheoryUncertainty(),
qqH_scale_Inclusive_Uncertainty(),
]
VBFSample.eventDictionaryInstance = MuTauEventDictionary
VBFSample.CreateEventWeight = VBFSample.CreateEventWeight_Standard
| [
"[email protected]"
] | |
c1cbefd9eb254fbfb66fd091901e1f5ea0bc6655 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/Lib/distutils/tests/test_sdist.py | 4a1af8cd49b957a68f580bed4a2e301705b0a693 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 13,716 | py | # 2017.08.29 21:56:18 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/distutils/tests/test_sdist.py
"""Tests for distutils.command.sdist."""
import os
import tarfile
import unittest
import warnings
import zipfile
from os.path import join
from textwrap import dedent
from test.test_support import captured_stdout, check_warnings, run_unittest
try:
import zlib
except ImportError:
zlib = None
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
from distutils.tests.test_config import PyPIRCCommandTestCase
from distutils.errors import DistutilsOptionError
from distutils.spawn import find_executable
from distutils.log import WARN
from distutils.filelist import FileList
from distutils.archive_util import ARCHIVE_FORMATS
SETUP_PY = "\nfrom distutils.core import setup\nimport somecode\n\nsetup(name='fake')\n"
MANIFEST = '# file GENERATED by distutils, do NOT edit\nREADME\nbuildout.cfg\ninroot.txt\nsetup.py\ndata%(sep)sdata.dt\nscripts%(sep)sscript.py\nsome%(sep)sfile.txt\nsome%(sep)sother_file.txt\nsomecode%(sep)s__init__.py\nsomecode%(sep)sdoc.dat\nsomecode%(sep)sdoc.txt\n'
class SDistTestCase(PyPIRCCommandTestCase):
def setUp(self):
super(SDistTestCase, self).setUp()
self.old_path = os.getcwd()
os.mkdir(join(self.tmp_dir, 'somecode'))
os.mkdir(join(self.tmp_dir, 'dist'))
self.write_file((self.tmp_dir, 'README'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#')
self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY)
os.chdir(self.tmp_dir)
def tearDown(self):
os.chdir(self.old_path)
super(SDistTestCase, self).tearDown()
def get_cmd(self, metadata = None):
"""Returns a cmd"""
if metadata is None:
metadata = {'name': 'fake',
'version': '1.0',
'url': 'xxx',
'author': 'xxx',
'author_email': 'xxx'}
dist = Distribution(metadata)
dist.script_name = 'setup.py'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.dist_dir = 'dist'
return (dist, cmd)
@unittest.skipUnless(zlib, 'requires zlib')
def test_prune_file_list(self):
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
self.write_file((self.tmp_dir,
'somecode',
'.svn',
'ok.py'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
self.write_file((self.tmp_dir,
'somecode',
'.hg',
'ok'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
self.write_file((self.tmp_dir,
'somecode',
'.git',
'ok'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
dist, cmd = self.get_cmd()
cmd.formats = ['zip']
cmd.ensure_finalized()
cmd.run()
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
self.assertEqual(len(content), 4)
@unittest.skipUnless(zlib, 'requires zlib')
def test_make_distribution(self):
dist, cmd = self.get_cmd()
cmd.formats = ['gztar', 'tar']
cmd.ensure_finalized()
cmd.run()
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
os.remove(join(dist_folder, 'fake-1.0.tar'))
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
cmd.formats = ['tar', 'gztar']
cmd.ensure_finalized()
cmd.run()
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
@unittest.skipUnless(zlib, 'requires zlib')
def test_unicode_metadata_tgz(self):
"""
Unicode name or version should not break building to tar.gz format.
Reference issue #11638.
"""
dist, cmd = self.get_cmd({'name': u'fake',
'version': u'1.0'})
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
self.assertEqual(result, ['fake-1.0.tar.gz'])
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
@unittest.skipUnless(zlib, 'requires zlib')
def test_add_defaults(self):
dist, cmd = self.get_cmd()
dist.package_data = {'': ['*.cfg', '*.dat'],
'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
data_dir = join(self.tmp_dir, 'data')
os.mkdir(data_dir)
self.write_file((data_dir, 'data.dt'), '#')
some_dir = join(self.tmp_dir, 'some')
os.mkdir(some_dir)
hg_dir = join(self.tmp_dir, '.hg')
os.mkdir(hg_dir)
self.write_file((hg_dir, 'last-message.txt'), '#')
self.write_file((self.tmp_dir, 'buildout.cfg'), '#')
self.write_file((self.tmp_dir, 'inroot.txt'), '#')
self.write_file((some_dir, 'file.txt'), '#')
self.write_file((some_dir, 'other_file.txt'), '#')
dist.data_files = [('data', ['data/data.dt',
'buildout.cfg',
'inroot.txt',
'notexisting']), 'some/file.txt', 'some/other_file.txt']
script_dir = join(self.tmp_dir, 'scripts')
os.mkdir(script_dir)
self.write_file((script_dir, 'script.py'), '#')
dist.scripts = [join('scripts', 'script.py')]
cmd.formats = ['zip']
cmd.use_defaults = True
cmd.ensure_finalized()
cmd.run()
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
self.assertEqual(len(content), 12)
f = open(join(self.tmp_dir, 'MANIFEST'))
try:
manifest = f.read()
finally:
f.close()
self.assertEqual(manifest, MANIFEST % {'sep': os.sep})
@unittest.skipUnless(zlib, 'requires zlib')
def test_metadata_check_option(self):
dist, cmd = self.get_cmd(metadata={})
cmd.ensure_finalized()
cmd.run()
warnings = [ msg for msg in self.get_logs(WARN) if msg.startswith('warning: check:') ]
self.assertEqual(len(warnings), 2)
self.clear_logs()
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.metadata_check = 0
cmd.run()
warnings = [ msg for msg in self.get_logs(WARN) if msg.startswith('warning: check:') ]
self.assertEqual(len(warnings), 0)
def test_check_metadata_deprecated(self):
dist, cmd = self.get_cmd()
with check_warnings() as w:
warnings.simplefilter('always')
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_show_formats(self):
with captured_stdout() as stdout:
show_formats()
num_formats = len(ARCHIVE_FORMATS.keys())
output = [ line for line in stdout.getvalue().split('\n') if line.strip().startswith('--formats=') ]
self.assertEqual(len(output), num_formats)
def test_finalize_options(self):
dist, cmd = self.get_cmd()
cmd.finalize_options()
self.assertEqual(cmd.manifest, 'MANIFEST')
self.assertEqual(cmd.template, 'MANIFEST.in')
self.assertEqual(cmd.dist_dir, 'dist')
cmd.formats = 1
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.formats = ['zip']
cmd.finalize_options()
cmd.formats = 'supazipa'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
@unittest.skipUnless(zlib, 'requires zlib')
@unittest.skipUnless(UID_GID_SUPPORT, 'Requires grp and pwd support')
@unittest.skipIf(find_executable('tar') is None, 'The tar command is not found')
@unittest.skipIf(find_executable('gzip') is None, 'The gzip command is not found')
def test_make_distribution_owner_group(self):
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.owner = pwd.getpwuid(0)[0]
cmd.group = grp.getgrgid(0)[0]
cmd.ensure_finalized()
cmd.run()
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, os.getuid())
finally:
archive.close()
def _check_template(self, content):
dist, cmd = self.get_cmd()
os.chdir(self.tmp_dir)
self.write_file('MANIFEST.in', content)
cmd.ensure_finalized()
cmd.filelist = FileList()
cmd.read_template()
warnings = self.get_logs(WARN)
self.assertEqual(len(warnings), 1)
def test_invalid_template_unknown_command(self):
self._check_template('taunt knights *')
def test_invalid_template_wrong_arguments(self):
self._check_template('prune')
@unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
def test_invalid_template_wrong_path(self):
self._check_template('include examples/')
@unittest.skipUnless(zlib, 'requires zlib')
def test_get_file_list(self):
dist, cmd = self.get_cmd()
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [ line.strip() for line in f.read().split('\n') if line.strip() != '' ]
finally:
f.close()
self.assertEqual(len(manifest), 5)
self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
build_py = dist.get_command_obj('build_py')
build_py.finalized = False
build_py.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest2 = [ line.strip() for line in f.read().split('\n') if line.strip() != '' ]
finally:
f.close()
self.assertEqual(len(manifest2), 6)
self.assertIn('doc2.txt', manifest2[-1])
@unittest.skipUnless(zlib, 'requires zlib')
def test_manifest_marker(self):
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [ line.strip() for line in f.read().split('\n') if line.strip() != '' ]
finally:
f.close()
self.assertEqual(manifest[0], '# file GENERATED by distutils, do NOT edit')
@unittest.skipUnless(zlib, 'requires zlib')
def test_manifest_comments(self):
contents = dedent(' # bad.py\n #bad.py\n good.py\n ')
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), contents)
self.write_file((self.tmp_dir, 'good.py'), '# pick me!')
self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!")
self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!")
cmd.run()
self.assertEqual(cmd.filelist.files, ['good.py'])
@unittest.skipUnless(zlib, 'requires zlib')
def test_manual_manifest(self):
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
self.write_file((self.tmp_dir, 'README.manual'), 'This project maintains its MANIFEST file itself.')
cmd.run()
self.assertEqual(cmd.filelist.files, ['README.manual'])
f = open(cmd.manifest)
try:
manifest = [ line.strip() for line in f.read().split('\n') if line.strip() != '' ]
finally:
f.close()
self.assertEqual(manifest, ['README.manual'])
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
filenames = [ tarinfo.name for tarinfo in archive ]
finally:
archive.close()
self.assertEqual(sorted(filenames), ['fake-1.0', 'fake-1.0/PKG-INFO', 'fake-1.0/README.manual'])
def test_suite():
return unittest.makeSuite(SDistTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\distutils\tests\test_sdist.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:56:18 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
4e1002d9ce5286e189a43928b766b6ff72a4dbff | 01926621374435f7daf622f1ef04a51f94e3e883 | /litex/build/quicklogic/platform.py | fbd200cb2efd6636f27feeb7075a6e6e6f0658c1 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | betrusted-io/litex | d717513e41ff6aba54ac172e886c21479aa41752 | 8109a8e91ca8321483ccc2f58bd4fed5379bbd18 | refs/heads/master | 2022-11-23T07:11:35.297128 | 2022-02-22T11:55:00 | 2022-02-22T11:55:00 | 231,203,917 | 3 | 0 | NOASSERTION | 2020-01-01T10:48:06 | 2020-01-01T10:48:05 | null | UTF-8 | Python | false | false | 1,184 | py | #
# This file is part of LiteX.
#
# Copyright (c) 2021 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
from litex.build.generic_platform import GenericPlatform
from litex.build.quicklogic import common, symbiflow
# QuickLogicPlatform -------------------------------------------------------------------------------
class QuickLogicPlatform(GenericPlatform):
bitstream_ext = ".bit"
def __init__(self, device, *args, toolchain="symbiflow", **kwargs):
GenericPlatform.__init__(self, device, *args, **kwargs)
if toolchain == "symbiflow":
self.toolchain = symbiflow.SymbiflowToolchain()
else:
raise ValueError(f"Unknown toolchain {toolchain}")
def get_verilog(self, *args, special_overrides=dict(), **kwargs):
so = dict(common.quicklogic_special_overrides)
so.update(special_overrides)
return GenericPlatform.get_verilog(self, *args,
special_overrides = so,
attr_translate = self.toolchain.attr_translate,
**kwargs)
def build(self, *args, **kwargs):
return self.toolchain.build(self, *args, **kwargs)
| [
"[email protected]"
] | |
c480f46b0c551272158063ee08ae7ef47fb91801 | 6b5c67590979627a97b7d8f0d9fc131b63fa817d | /cgettext.py | 11081dd45c063fcc7fa697958c11031a104e4612 | [
"MIT"
] | permissive | eevee/cgettext | 303357e28349a6cdd906a3e5ffb2fc6889041f37 | 9efa06369c19c0631dbebbc2f45f787b4cd01eb5 | refs/heads/master | 2016-09-05T09:01:11.343350 | 2014-06-27T20:03:10 | 2014-06-27T20:03:10 | 19,359,054 | 1 | 0 | null | 2014-05-31T01:02:46 | 2014-05-01T21:56:14 | Python | UTF-8 | Python | false | false | 495 | py | try:
from _cgettext import c_parse
except ImportError:
# No C module available; just re-export the builtin
from gettext import GNUTranslations
else:
import gettext
class GNUTranslations(gettext.GNUTranslations):
def _parse(self, fp):
charset, metadata, catalog, plural = c_parse(fp)
self._charset = charset
self._info = metadata
self._catalog = catalog
self.plural = plural
__all__ = ['GNUTranslations']
| [
"[email protected]"
] | |
0467a469bfb2a1b833b93af0761a056efbc02d40 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/TRACE/FPythonCode/FTradeSheetColumnCustom.py | 7368f700f8f3f51a6c7c5f45f72afb9cdeed7e09 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py |
import acm
def get_TradeReportTransType_string_from_value(val):
'''
Accepts value for TradeReportTransType, in FIX message and returns mapped value, which needs to be displayed in tradesheet for particular column.
'''
switcher = {
"0": "New",
"1": "Cancel",
"2": "Replace",
"3": "Release",
"4": "Reverse"
}
ret = switcher.get(val, val)
return ret
def get_TradeReportType_string_from_value(val):
'''
Accepts value for TradeReportType, in FIX message and returns mapped value, which needs to be displayed in tradesheet for particular column.
'''
switcher = {
"0": "Submit",
"1": "Alleged",
"2": "Accept",
"3": "Decline",
"4": "Addendum",
"5": "No/Was",
"6": "Trade Report Cancel",
"7": "Locked In Trade Break"
}
ret = switcher.get(val, val)
return ret
def get_PartyRole_string_from_value(val):
'''
Accepts value for PartyRole, in FIX message and returns mapped value, which needs to be displayed in tradesheet for particular column.
'''
switcher = {
"1": "Executing Firm",
"7": "Entering Firm",
"14": "Giveup Firm",
"17": "Contra Firm",
"83": "Clearing Account"
}
ret = switcher.get(val, val)
return ret
| [
"[email protected]"
] | |
c12a2731c0266326e4342197497bdbe4b3103bbe | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/KoubeiCateringOrderPayDisburseModel.py | d7684ebecee96e879c01d6568cb06e1e665fd1cf | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,200 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PosOrderKey import PosOrderKey
class KoubeiCateringOrderPayDisburseModel(object):
def __init__(self):
self._auth_code = None
self._member_flag = None
self._out_pay_no = None
self._pos_order_key = None
self._timeout = None
self._total_amount = None
self._undiscountable = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def member_flag(self):
return self._member_flag
@member_flag.setter
def member_flag(self, value):
self._member_flag = value
@property
def out_pay_no(self):
return self._out_pay_no
@out_pay_no.setter
def out_pay_no(self, value):
self._out_pay_no = value
@property
def pos_order_key(self):
return self._pos_order_key
@pos_order_key.setter
def pos_order_key(self, value):
if isinstance(value, PosOrderKey):
self._pos_order_key = value
else:
self._pos_order_key = PosOrderKey.from_alipay_dict(value)
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
@property
def undiscountable(self):
return self._undiscountable
@undiscountable.setter
def undiscountable(self, value):
self._undiscountable = value
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.member_flag:
if hasattr(self.member_flag, 'to_alipay_dict'):
params['member_flag'] = self.member_flag.to_alipay_dict()
else:
params['member_flag'] = self.member_flag
if self.out_pay_no:
if hasattr(self.out_pay_no, 'to_alipay_dict'):
params['out_pay_no'] = self.out_pay_no.to_alipay_dict()
else:
params['out_pay_no'] = self.out_pay_no
if self.pos_order_key:
if hasattr(self.pos_order_key, 'to_alipay_dict'):
params['pos_order_key'] = self.pos_order_key.to_alipay_dict()
else:
params['pos_order_key'] = self.pos_order_key
if self.timeout:
if hasattr(self.timeout, 'to_alipay_dict'):
params['timeout'] = self.timeout.to_alipay_dict()
else:
params['timeout'] = self.timeout
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
if self.undiscountable:
if hasattr(self.undiscountable, 'to_alipay_dict'):
params['undiscountable'] = self.undiscountable.to_alipay_dict()
else:
params['undiscountable'] = self.undiscountable
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringOrderPayDisburseModel()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'member_flag' in d:
o.member_flag = d['member_flag']
if 'out_pay_no' in d:
o.out_pay_no = d['out_pay_no']
if 'pos_order_key' in d:
o.pos_order_key = d['pos_order_key']
if 'timeout' in d:
o.timeout = d['timeout']
if 'total_amount' in d:
o.total_amount = d['total_amount']
if 'undiscountable' in d:
o.undiscountable = d['undiscountable']
return o
| [
"[email protected]"
] | |
8edff0421ebc56d61abee4a4cef9d6eef91672f0 | f6290b7b8ffb263b7f0d252a67e2c6320a4c1143 | /Binary Tree/height_of_special_binary_tree.py | 180231ea70bd5a270e62130aca6e3fd2873838a8 | [] | no_license | datAnir/GeekForGeeks-Problems | b45b0ae80053da8a1b47a2af06e688081574ef80 | c71f11d0349ed3850dfaa9c7a078ee70f67e46a1 | refs/heads/master | 2023-05-29T15:21:59.680793 | 2020-12-15T04:55:01 | 2020-12-15T04:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | '''
https://practice.geeksforgeeks.org/problems/height-of-spiral-tree/1
Given a special Binary Tree whose leaf nodes are connected to form a circular doubly linked list. Find the height of this special Binary Tree.
Example 1:
Input:
1
/ \
2 3
/ \
4 5
/
6
Output: 4
Explanation:
In the above binary tree, 6, 5 and 3 are leaf nodes and they form a circular doubly linked list. Here, the left pointer of leaf node will act as a previous pointer of circular doubly linked list and its right pointer will act as next pointer of circular doubly linked list.
'''
# method - 1 => create visited array, and using BFS find total level
from collections import deque, defaultdict
def findTreeHeight(root):
q = deque([root])
level = 0
visited = defaultdict(bool)
visited[root] = True
while len(q) > 0:
size = len(q)
for i in range(size):
node = q.popleft()
if node.left and not visited[node.left]:
q.append(node.left)
visited[node.left] = True
if node.right and not visited[node.right]:
q.append(node.right)
visited[node.right] = True
level += 1
return level
# method - 2 => check circular conditions extra
# if current node is equal to next node's previous or next
def findTreeHeight(root):
if root == None:
return 0
if (root.left != None and root == root.left.right) or (root.right != None and root == root.right.left):
return 1
lh = findTreeHeight(root.left)
rh = findTreeHeight(root.right)
return max(lh, rh) + 1 | [
"[email protected]"
] | |
2056c13bcd2d7119d86fef5e4c54c693d4a2d140 | 6f10c043a65d7c0f39cb75b3d39370261e2371c3 | /papers/tests.py | 8b4f86b9d012cc17e78d75343b0d307434c9771f | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | BridgesLab/Lab-Website | 8d71def7874d7970c42fe2e697f1c163ae5e7eb9 | d6f6c9c068bbf668c253e5943d9514947023e66d | refs/heads/master | 2020-12-25T08:37:36.970077 | 2020-05-26T01:25:00 | 2020-05-26T01:25:00 | 12,854,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,411 | py | """
This package contains the unit tests for the :mod:`papers` app.
It contains view and model tests for each model, grouped together.
Contains the two model tests:
* :class:`~papers.tests.PublicationModelTests`
* :class:`~papers.tests.AuthorDetailsModelTests`
The API tests:
* :class:`~PublicationResourceTests`
And the view tests:
* :class:`~papers.tests.PublicationViewTests`
"""
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from papers.models import Publication, AuthorDetails, Person, Commentary
MODELS = [Publication, AuthorDetails, Commentary]
class PublicationModelTests(TestCase):
'''This class tests various aspects of the :class:`~papers.models.Publication` model.'''
fixtures = ['test_publication.json', 'test_publication_personnel.json']
def setUp(self):
'''Instantiate the test client. Creates a test user.'''
self.client = Client()
self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')
self.test_user.is_superuser = True
self.test_user.is_active = True
self.test_user.save()
self.assertEqual(self.test_user.is_superuser, True)
login = self.client.login(username='testuser', password='testpassword')
self.failUnless(login, 'Could not log in')
def tearDown(self):
'''Depopulate created model instances from test database.'''
for model in MODELS:
for obj in model.objects.all():
obj.delete()
def test_create_new_paper_minimum(self):
'''This test creates a :class:`~papers.models.Publication` with the required information only.'''
test_publication = Publication(title='Test Publication.', laboratory_paper=True, interesting_paper=False, preprint=False)
test_publication.save()
self.assertEqual(test_publication.pk, 3)
#def test_create_new_paper_all(self):
#'''This test creates a `::class:Publication` with the required information only.'''
#test_publication = Publication(title='Test Publication') #add more fields
#test_publication.save()
def test_paper_unicode(self):
'''This tests the unicode representation of a :class:`~papers.models.Publication`.'''
test_publication = Publication.objects.get(title_slug='14-3-3-proteins-a-number-of-functions-for-a-numbered-protein', laboratory_paper=True, interesting_paper=False, preprint=False)
self.assertEqual(test_publication.__unicode__(), "14-3-3 proteins: a number of functions for a numbered protein.")
def test_paper_title_slug(self):
'''This tests the title_slug field of a :class:`~papers.models.Publication`.'''
test_publication = Publication(title='Test Publication.', laboratory_paper=True, interesting_paper=False, preprint=False)
test_publication.save()
self.assertEqual(test_publication.title_slug, "test-publication")
def test_paper_absolute_url(self):
'''This tests the title_slug field of a :class:`~papers.models.Publication`.'''
test_publication = Publication(title='Test Publication', laboratory_paper=True, interesting_paper=False, preprint=False)
test_publication.save()
self.assertEqual(test_publication.get_absolute_url(), "/papers/test-publication")
def test_paper_doi_link(self):
'''This tests the title_slug field of a :class:`~papers.models.Publication`.'''
test_publication = Publication.objects.get(title="14-3-3 proteins: a number of functions for a numbered protein.", laboratory_paper=True, interesting_paper=False, preprint=False)
self.assertEqual(test_publication.doi_link(), "http://dx.doi.org/10.1126/stke.2962005re10")
def test_full_pmcid(self):
'''This tests that a correct full PMCID can be generated for a :class:`~papers.models.Publication`.'''
test_publication = Publication(title="Test Publication", pmcid = "12345", laboratory_paper=True, interesting_paper=False, preprint=False)
test_publication.save()
self.assertEqual(test_publication.full_pmcid(), 'PMC12345')
class AuthorDetailsModelTests(TestCase):
'''This class tests varios aspects of the :class:`~papers.models.AuthorDetails` model.'''
fixtures = ['test_publication', 'test_publication_personnel']
def setUp(self):
'''Instantiate the test client. Creates a test user.'''
self.client = Client()
self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')
self.test_user.is_superuser = True
self.test_user.is_active = True
self.test_user.save()
self.assertEqual(self.test_user.is_superuser, True)
login = self.client.login(username='testuser', password='testpassword')
self.failUnless(login, 'Could not log in')
def tearDown(self):
'''Depopulate created model instances from test database.'''
for model in MODELS:
for obj in model.objects.all():
obj.delete()
def test_create_new_authordetail_minimum(self):
'''This test creates a :class:`~papers.models.AuthorDetails` with the required information only.'''
test_authordetail = AuthorDetails(author=Person.objects.get(pk=1),
order = 1, corresponding_author=True, equal_contributors=False)
test_authordetail.save()
def test_create_new_authordetail_all(self):
'''This test creates a :class:`~papers.models.AuthorDetails` with the required information only.'''
test_authordetail = AuthorDetails(author=Person.objects.get(pk=1),
order = 1,
corresponding_author = True,
equal_contributors = True)
test_authordetail.save()
def test_authordetail_unicode(self):
'''This tests that the unicode representaton of an :class:`~papers.models.AuthorDetails` object is correct.'''
test_authordetail = AuthorDetails(author=Person.objects.get(pk=1),
order = 1, corresponding_author=True, equal_contributors=False)
test_authordetail.save()
self.assertEqual(test_authordetail.__unicode__(), '1 - None - Dave Bridges')
class CommentaryModelTests(TestCase):
'''This class tests various aspects of the :class:`~papers.models.Commentary` model.'''
fixtures = ['test_publication', 'test_personnel','test_publication_personnel.json']
def setUp(self):
'''Instantiate the test client. Creates a test user.'''
self.client = Client()
self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')
self.test_user.is_superuser = True
self.test_user.is_active = True
self.test_user.save()
self.assertEqual(self.test_user.is_superuser, True)
login = self.client.login(username='testuser', password='testpassword')
self.failUnless(login, 'Could not log in')
def tearDown(self):
'''Depopulate created model instances from test database.'''
for model in MODELS:
for obj in model.objects.all():
obj.delete()
def test_create_new_commentary_minimum(self):
'''This test creates a :class:`~papers.models.Commentary` with the required information only.'''
test_commentary = Commentary(paper=Publication.objects.get(pk=1),
comments = "Some comments")
test_commentary.save()
self.assertEqual(test_commentary.pk, 1)
def test_create_new_commentary_all(self):
'''This test creates a :class:`~papers.models.Commentary` with all fields entered.'''
test_commentary = Commentary(paper=Publication.objects.get(pk=1),
comments = "Some comments",
author = Person.objects.get(pk=1),
citation = "some citation")
test_commentary.save()
self.assertEqual(test_commentary.pk, 1)
def test_commentary_unicode(self):
'''This test creates a :class:`~papers.models.Commentary` and then verifies the unicode representation is correct.'''
test_commentary = Commentary(paper=Publication.objects.get(pk=1),
comments = "Some comments")
test_commentary.save()
self.assertEqual(test_commentary.__unicode__(), "Journal club summary on 14-3-3 proteins: a number of functions for a numbered protein.")
class PublicationResourceTests(TestCase):
'''This class tests varios aspects of the :class:`~papers.api.PublicationResource` API model.'''
fixtures = ['test_publication', 'test_publication_personnel']
def setUp(self):
'''Instantiate the test client. Creates a test user.'''
self.client = Client()
self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')
self.test_user.is_superuser = True
self.test_user.is_active = True
self.test_user.save()
self.assertEqual(self.test_user.is_superuser, True)
login = self.client.login(username='testuser', password='testpassword')
self.failUnless(login, 'Could not log in')
def tearDown(self):
'''Depopulate created model instances from test database.'''
for model in MODELS:
for obj in model.objects.all():
obj.delete()
def api_publication_list_test(self):
'''This tests that the API correctly renders a list of :class:`~papers.models.Publication` objects.'''
response = self.client.get('/api/v1/publications/?format=json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
def api_publication_detail_test(self):
'''This tests that the API correctly renders a particular :class:`~papers.models.Publication` objects.'''
response = self.client.get('/api/v1/publications/1/?format=json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
print response
class PublicationViewTests(TestCase):
'''This class tests the views for :class:`~papers.models.Publication` objects.'''
fixtures = ['test_publication', 'test_publication_personnel']
def setUp(self):
"""Instantiate the test client. Creates a test user."""
self.client = Client()
self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')
self.test_user.is_superuser = True
self.test_user.is_active = True
self.test_user.save()
self.assertEqual(self.test_user.is_superuser, True)
login = self.client.login(username='testuser', password='testpassword')
self.failUnless(login, 'Could not log in')
def tearDown(self):
"""Depopulate created model instances from test database."""
for model in MODELS:
for obj in model.objects.all():
obj.delete()
def test_publication_view(self):
"""This tests the paper-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('publication' in test_response.context)
self.assertTemplateUsed(test_response, 'paper-detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'paper_sharing_widgets.html')
self.assertTemplateUsed(test_response, 'altmetric_snippet.html')
self.assertEqual(test_response.context['publication'].pk, 1)
self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')
def test_lab_papers_list(self):
"""This tests the laboratory-papers view ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('publication_list' in test_response.context)
self.assertTemplateUsed(test_response, 'paper-list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'facebook_api_sdk_script.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTemplateUsed(test_response, 'paper-detail-snippet.html')
self.assertEqual(test_response.context['publication_list'][0].pk, 1)
self.assertEqual(test_response.context['publication_list'][0].title, u'14-3-3 proteins: a number of functions for a numbered protein.')
def test_interesting_papers_list(self):
"""This tests the interesting-papers view ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/interesting')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('publication_list' in test_response.context)
self.assertTemplateUsed(test_response, 'paper-list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'paper-detail-snippet.html')
self.assertEqual(test_response.context['publication_list'][0].pk, 2)
self.assertEqual(test_response.context['publication_list'][0].title, u"THE RELATION OF ADENOSINE-3', 5'-PHOSPHATE AND PHOSPHORYLASE TO THE ACTIONS OF CATECHOLAMINES AND OTHER HORMONES.")
def test_publication_view_create(self):
"""This tests the paper-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/new/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'publication_form.html')
def test_publication_view_edit(self):
"""This tests the paper-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/edit/')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('publication' in test_response.context)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'publication_form.html')
self.assertEqual(test_response.context['publication'].pk, 1)
self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')
#verifies that a non-existent object returns a 404 error presuming there is no object with pk=2.
null_response = self.client.get('/papers/not-a-real-paper/edit/')
self.assertEqual(null_response.status_code, 404)
def test_publication_view_delete(self):
"""This tests the paper-delete view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/delete/')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('publication' in test_response.context)
self.assertTemplateUsed(test_response, 'confirm_delete.html')
self.assertEqual(test_response.context['publication'].pk, 1)
self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')
#verifies that a non-existent object returns a 404 error.
null_response = self.client.get('/papers/not-a-real-paper/delete/')
self.assertEqual(null_response.status_code, 404)
class CommentaryViewTests(TestCase):
'''This class tests the views for :class:`~papers.models.Commentary` objects.'''
fixtures = ['test_publication', 'test_personnel', 'test_commentary','test_publication_personnel.json']
def setUp(self):
"""Instantiate the test client. Creates a test user."""
self.client = Client()
self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')
self.test_user.is_superuser = True
self.test_user.is_active = True
self.test_user.save()
self.assertEqual(self.test_user.is_superuser, True)
login = self.client.login(username='testuser', password='testpassword')
self.failUnless(login, 'Could not log in')
def tearDown(self):
"""Depopulate created model instances from test database."""
for model in MODELS:
for obj in model.objects.all():
obj.delete()
def test_commentary_view(self):
"""This tests the commentary-detail view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/commentary/1')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('commentary' in test_response.context)
self.assertTemplateUsed(test_response, 'commentary-detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertEqual(test_response.context['commentary'].pk, 1)
self.assertEqual(test_response.context['commentary'].paper.__unicode__(), u'14-3-3 proteins: a number of functions for a numbered protein.')
self.assertEqual(test_response.context['commentary'].comments, "some comments for this fixture")
#verifies that a non-existent object returns a 404 error.
null_response = self.client.get('/papers/commentary/9999')
self.assertEqual(null_response.status_code, 404)
def test_commentary_view_create(self):
"""This tests the commentary-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/commentary/new')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'commentary-form.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
def test_commentary_view_edit(self):
"""This tests the commentary-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/commentary/1/edit')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('commentary' in test_response.context)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'commentary-form.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertEqual(test_response.context['commentary'].pk, 1)
self.assertEqual(test_response.context['commentary'].paper.__unicode__(), u'14-3-3 proteins: a number of functions for a numbered protein.')
self.assertEqual(test_response.context['commentary'].comments, "some comments for this fixture")
#verifies that a non-existent object returns a 404 error.
null_response = self.client.get('/papers/commentary/9999/edit')
self.assertEqual(null_response.status_code, 404)
def test_commentary_view_delete(self):
"""This tests the commentary-delete view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/commentary/1/delete')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('object' in test_response.context)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'confirm_delete.html')
def test_commentary_view_list(self):
"""This tests the commentary-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/papers/commentaries')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('commentary_list' in test_response.context)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'commentary-list.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertEqual(test_response.context['commentary_list'][0].pk, 1)
self.assertEqual(test_response.context['commentary_list'][0].paper.__unicode__(), u'14-3-3 proteins: a number of functions for a numbered protein.')
self.assertEqual(test_response.context['commentary_list'][0].comments, "some comments for this fixture")
def test_jc_view_list(self):
"""This tests the jc-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/journal-club')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('journal_club_list' in test_response.context)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'jc-list.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html') | [
"[email protected]"
] | |
91469a33a999bfea9fed7f0776edd52783522402 | b05fee086482565ef48785f2a9c57cfe2c169f68 | /part_one/6-builder_pattern/builder/director.py | 30ec18b0022359e51962b212e0ff4a289f939982 | [] | no_license | diegogcc/py-design_patterns | 76db926878d5baf9aea1f3d2f6a09f4866c3ce1e | 2b49b981f2d3514bbd02796fe9a8ec083df6bb38 | refs/heads/master | 2023-04-01T08:28:53.211024 | 2021-04-05T11:48:19 | 2021-04-05T11:48:19 | 304,145,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | class Director:
def __init__(self, builder):
self._builder = builder
def get_computer(self):
return self._builder.get_computer()
def build_computer(self):
self._builder.new_computer()
self._builder.get_case()
self._builder.build_mainboard()
self._builder.install_mainboard()
self._builder.install_hard_drive()
self._builder.install_video_card() | [
"[email protected]"
] | |
a1bab5f325d133df17fbae75ee780f703da474c6 | 482467f7875513440ccc9fb5ee5755214137e8df | /homeassistant/components/stiebel_eltron/__init__.py | 52dc2d848918bf88b821b56a49c0cb0a36338a48 | [
"Apache-2.0"
] | permissive | Watemlifts/home-assistant | fbf16d91489f9ab472b1fda928fc472f99d2b057 | 6e414983738d9495eb9e4f858e3e98e9e38869db | refs/heads/dev | 2023-07-21T06:38:40.212969 | 2023-07-15T09:33:07 | 2023-07-15T09:33:07 | 195,134,511 | 4 | 0 | Apache-2.0 | 2023-07-15T09:33:08 | 2019-07-03T22:34:49 | Python | UTF-8 | Python | false | false | 1,761 | py | """The component for STIEBEL ELTRON heat pumps with ISGWeb Modbus module."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.modbus import (
CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN)
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
DOMAIN = 'stiebel_eltron'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the STIEBEL ELTRON unit.
Will automatically load climate platform.
"""
name = config[DOMAIN][CONF_NAME]
modbus_client = hass.data[MODBUS_DOMAIN][config[DOMAIN][CONF_HUB]]
hass.data[DOMAIN] = {
'name': name,
'ste_data': StiebelEltronData(name, modbus_client)
}
discovery.load_platform(hass, 'climate', DOMAIN, {}, config)
return True
class StiebelEltronData:
"""Get the latest data and update the states."""
def __init__(self, name, modbus_client):
"""Init the STIEBEL ELTRON data object."""
from pystiebeleltron import pystiebeleltron
self.api = pystiebeleltron.StiebelEltronAPI(modbus_client, 1)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update unit data."""
if not self.api.update():
_LOGGER.warning("Modbus read failed")
else:
_LOGGER.debug("Data updated successfully")
| [
"[email protected]"
] | |
3333bd7d1d54e4a76c2974fe2941e952ca4dd14a | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /log-20190927/132.230.102.123-10.21.11.11/1569575419.py | 5356af5e985e01c5e1c1f9be6046d34f1addbda1 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | import functools
import typing
import string
import random
import pytest
def leap(year: int) -> bool:
"""
Args:
year: an integer
Returns:
a boolean expression
"""
if year < 1583:
return False
if year % 4 == 0: #and year % 100 == 0 and year %400 != 0:
if year % 100 == 0 and year %400 != 0:
print("test")
return False
else:
return True
else:
return False
print(leap(1582))
print(leap(1644))
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(4))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(year):
nonlocal covered, count
if year % 4 != 0:
covered.add(0)
elif year % 100 != 0:
covered.add(1)
elif year % 400 != 0:
covered.add(2)
else:
covered.add(3)
r = func (year)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
functools.update_wrapper(wrapper, func)
return wrapper
return coverage
coverage = mk_coverage ()
try:
leap = coverage(leap)
except:
pass
## Lösung Teil 2 (Tests)
def test_leap():
assert leap(1582) == False
assert leap(1583) == False
assert leap(1600) == True
assert leap(1644) == False
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_leap (self):
assert leap
assert 'year' in getfullargspec(leap).args
class TestGrades:
def test_docstring_present(self):
assert leap.__doc__ is not None
def test_typing_present(self):
assert leap.__hints__ == typing.get_type_hints(self.leap_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def leap_oracle(self, year :int) -> bool:
if year % 4 != 0:
return False
elif year % 100 != 0:
return True
elif year % 400 == 0:
return True
else:
return False
def check_leap (self, year):
assert leap (year) == self.leap_oracle (year)
def test_correctness(self):
for i in range (100):
year = random.randrange (1582,2500)
self.check_leap (year)
for i in range (100):
year = random.randrange (1600,3000, 100)
self.check_leap (year)
| [
"[email protected]"
] | |
92eb574a98ab18224e6f678efd484bebf0f75fbd | f1961c86e6da14f35c21d7235f4fc8a89fabdcad | /DailyProgrammer/DP20140226B.py | 723789a5c53f06aeb779421827eca6c15247d7f0 | [
"MIT"
] | permissive | DayGitH/Python-Challenges | d4930bdd85cd1a977d8f6192775ca956a375fcde | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | refs/heads/master | 2021-01-17T13:01:03.784523 | 2018-06-29T23:49:04 | 2018-06-29T23:49:04 | 58,497,683 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | """
[02/26/14] Challenge #150 [Intermediate] Re-emvoweler 1
https://www.reddit.com/r/dailyprogrammer/comments/1yzlde/022614_challenge_150_intermediate_reemvoweler_1/
# _(Intermediate)_: Re-emvoweler 1
In [this week's Easy
challenge](http://www.reddit.com/r/dailyprogrammer/comments/1ystvb/022414_challenge_149_easy_disemvoweler/), series of
words were disemvoweled into vowels, and non-vowel letters. Spaces were also removed. Your task today is, given the two
strings produced via disemvowelment, output _one possibility_ for the original string.
1. Your output must be such that if you put it through the solution to this week's Easy challenge, you'll recover
exactly the input you were given.
2. You don't need to output the same string as the one that was originally disemvoweled, just _some_ string that
disemvowels to your input.
3. Use [the Enable word list](http://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt), or some
other reasonable English word list. Every word in your output must appear in your word list.
4. For the sample inputs, all words in originally disemvoweled strings appear in Enable. In particular, I'm not using
any words with punctuation, and I'm not using the word "a".
5. As before, ignore punctuation and capitalization.
# Formal Inputs & Outputs
## Input description
Two strings, one containing only non-vowel letters, and one containing only vowels.
## Output description
A space-separated series of words that could be disemvoweled into the input, each word of which must appear in your
word list.
# Sample Inputs & Outputs
## Sample Input 1
wwllfndffthstrds
eieoeaeoi
## Sample Output 1
There are, in general, many correct outputs. Any of these is valid output for the sample input (using the Enable word
list to verify words):
we wile lo fen daff et host rids
we wile lo fend aff eths tor ids
we wile lo fen daff the sot rids
we will fend off eths tare do si
we will fend off the asteroids
## Sample Input 2
bbsrshpdlkftbllsndhvmrbndblbnsthndlts
aieaeaeieooaaaeoeeaeoeaau
## Sample Outputs 2
ab bise ars he ae pi ed look fa tab all sned hove me ar bend blob ens than adults
ai be base rash pe die look fat bal la sned hove me ar bend blob ens than adults
babies ae rash pe die loo ka fat balls end ho vee mar bend blob ens than adults
babies rash pedal kef tie bolls nod aah ave omer bendable bones than adults
babies are shaped like footballs and have more bendable bones than adults
## Sample Input 3
llfyrbsshvtsmpntbncnfrmdbyncdt
aoouiaeaeaoeoieeoieaeoe
# Notes
Thanks to /u/abecedarius for inspiring this challenge on /r/dailyprogrammer_ideas!
Think you can do a better job of re-emvoweling? Check out this week's Hard challenge!
"""
def main():
pass
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c1a3f8a768bbde06f5bc0e63a67e80424aa23eeb | a7ded5d3d19a98e61a44189cffe3703f7938e0db | /xero_python/accounting/models/country_code.py | 20c8ef17a04040149b701b78f318d6a723c86732 | [
"MIT"
] | permissive | liseekeralbert/xero-python | dfd1076344f763d74f81f701e32600cf88bcc7b2 | d27ab1894ecd84d2a9af0ca91583593756b21ab3 | refs/heads/master | 2022-12-16T07:41:14.331308 | 2020-09-18T17:12:35 | 2020-09-18T17:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,889 | py | # coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.3.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from enum import Enum
class CountryCode(Enum):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
allowed enum values
"""
AD = "AD"
AE = "AE"
AF = "AF"
AG = "AG"
AI = "AI"
AL = "AL"
AM = "AM"
AN = "AN"
AO = "AO"
AQ = "AQ"
AR = "AR"
AS = "AS"
AT = "AT"
AU = "AU"
AW = "AW"
AZ = "AZ"
BA = "BA"
BB = "BB"
BD = "BD"
BE = "BE"
BF = "BF"
BG = "BG"
BH = "BH"
BI = "BI"
BJ = "BJ"
BL = "BL"
BM = "BM"
BN = "BN"
BO = "BO"
BR = "BR"
BS = "BS"
BT = "BT"
BW = "BW"
BY = "BY"
BZ = "BZ"
CA = "CA"
CC = "CC"
CD = "CD"
CF = "CF"
CG = "CG"
CH = "CH"
CI = "CI"
CK = "CK"
CL = "CL"
CM = "CM"
CN = "CN"
CO = "CO"
CR = "CR"
CU = "CU"
CV = "CV"
CW = "CW"
CX = "CX"
CY = "CY"
CZ = "CZ"
DE = "DE"
DJ = "DJ"
DK = "DK"
DM = "DM"
DO = "DO"
DZ = "DZ"
EC = "EC"
EE = "EE"
EG = "EG"
EH = "EH"
ER = "ER"
ES = "ES"
ET = "ET"
FI = "FI"
FJ = "FJ"
FK = "FK"
FM = "FM"
FO = "FO"
FR = "FR"
GA = "GA"
GB = "GB"
GD = "GD"
GE = "GE"
GG = "GG"
GH = "GH"
GI = "GI"
GL = "GL"
GM = "GM"
GN = "GN"
GQ = "GQ"
GR = "GR"
GT = "GT"
GU = "GU"
GW = "GW"
GY = "GY"
HK = "HK"
HN = "HN"
HR = "HR"
HT = "HT"
HU = "HU"
ID = "ID"
IE = "IE"
IL = "IL"
IM = "IM"
IN = "IN"
IO = "IO"
IQ = "IQ"
IR = "IR"
IS = "IS"
IT = "IT"
JE = "JE"
JM = "JM"
JO = "JO"
JP = "JP"
KE = "KE"
KG = "KG"
KH = "KH"
KI = "KI"
KM = "KM"
KN = "KN"
KP = "KP"
KR = "KR"
KW = "KW"
KY = "KY"
KZ = "KZ"
LA = "LA"
LB = "LB"
LC = "LC"
LI = "LI"
LK = "LK"
LR = "LR"
LS = "LS"
LT = "LT"
LU = "LU"
LV = "LV"
LY = "LY"
MA = "MA"
MC = "MC"
MD = "MD"
ME = "ME"
MF = "MF"
MG = "MG"
MH = "MH"
MK = "MK"
ML = "ML"
MM = "MM"
MN = "MN"
MO = "MO"
MP = "MP"
MR = "MR"
MS = "MS"
MT = "MT"
MU = "MU"
MV = "MV"
MW = "MW"
MX = "MX"
MY = "MY"
MZ = "MZ"
NA = "NA"
NC = "NC"
NE = "NE"
NG = "NG"
NI = "NI"
NL = "NL"
NO = "NO"
NP = "NP"
NR = "NR"
NU = "NU"
NZ = "NZ"
OM = "OM"
PA = "PA"
PE = "PE"
PF = "PF"
PG = "PG"
PH = "PH"
PK = "PK"
PL = "PL"
PM = "PM"
PN = "PN"
PR = "PR"
PS = "PS"
PT = "PT"
PW = "PW"
PY = "PY"
QA = "QA"
RE = "RE"
RO = "RO"
RS = "RS"
RU = "RU"
RW = "RW"
SA = "SA"
SB = "SB"
SC = "SC"
SD = "SD"
SE = "SE"
SG = "SG"
SH = "SH"
SI = "SI"
SJ = "SJ"
SK = "SK"
SL = "SL"
SM = "SM"
SN = "SN"
SO = "SO"
SR = "SR"
SS = "SS"
ST = "ST"
SV = "SV"
SX = "SX"
SY = "SY"
SZ = "SZ"
TC = "TC"
TD = "TD"
TG = "TG"
TH = "TH"
TJ = "TJ"
TK = "TK"
TL = "TL"
TM = "TM"
TN = "TN"
TO = "TO"
TR = "TR"
TT = "TT"
TV = "TV"
TW = "TW"
TZ = "TZ"
UA = "UA"
UG = "UG"
US = "US"
UY = "UY"
UZ = "UZ"
VA = "VA"
VC = "VC"
VE = "VE"
VG = "VG"
VI = "VI"
VN = "VN"
VU = "VU"
WF = "WF"
WS = "WS"
XK = "XK"
YE = "YE"
YT = "YT"
ZA = "ZA"
ZM = "ZM"
ZW = "ZW"
| [
"[email protected]"
] | |
ca71ccf61df6a0176341a1941ea6e6315f5fcdf1 | e3040a2e23a856e319e02037dc6baf3882c796b9 | /samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/paths/response_body_post_oneof_response_body_for_content_types/post.pyi | ef5fdf6e1c02d23db716ba98c8ca2592e6274dbc | [
"Apache-2.0"
] | permissive | mishin/openapi-generator | 2ed2e0739c0cc2a627c25191d5898071d9294036 | 3ed650307513d552404f3d76487f3b4844acae41 | refs/heads/master | 2023-06-10T03:01:09.612130 | 2022-10-14T08:29:15 | 2022-10-14T08:29:15 | 271,080,285 | 0 | 0 | Apache-2.0 | 2023-05-30T02:01:25 | 2020-06-09T18:29:41 | Java | UTF-8 | Python | false | false | 7,519 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.oneof import Oneof
SchemaFor200ResponseBodyApplicationJson = Oneof
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _post_oneof_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostOneofResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post_oneof_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_oneof_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_oneof_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
] | |
f9a778bd121f5471a7545a51299c85b1ed6fe37d | 7b74696ff2ab729396cba6c203984fce5cd0ff83 | /stockmarket/migrations/0018_auto_20210310_0713.py | a92ec4772b8fef3cfc51038084147fdbfb09d35c | [
"MIT"
] | permissive | webclinic017/investtrack | e9e9a7a8caeecaceebcd79111c32b334c4e1c1d0 | 4aa204b608e99dfec3dd575e72b64a6002def3be | refs/heads/master | 2023-06-18T12:57:32.417414 | 2021-07-10T14:26:53 | 2021-07-10T14:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | py | # Generated by Django 3.0.7 on 2021-03-09 23:13
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('stockmarket', '0017_auto_20210307_1803'),
]
operations = [
migrations.AddField(
model_name='companydailybasic',
name='pb',
field=models.FloatField(blank=True, null=True, verbose_name='市净率'),
),
migrations.AddField(
model_name='companydailybasic',
name='ps',
field=models.FloatField(blank=True, null=True, verbose_name='市销率'),
),
migrations.AddField(
model_name='companydailybasic',
name='ps_ttm',
field=models.FloatField(blank=True, null=True, verbose_name='市销率TTM'),
),
migrations.CreateModel(
name='IndexDailyBasic',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='最后更新时间')),
('ts_code', models.CharField(blank=True, max_length=50, unique=True, verbose_name='TS代码')),
('trade_date', models.DateField(blank=True, null=True, verbose_name='交易日期')),
('turnover_rate', models.FloatField(blank=True, max_length=50, null=True, verbose_name='换手率')),
('turnover_rate_f', models.FloatField(blank=True, max_length=50, null=True, verbose_name='换手率(自由流通)')),
('pe', models.FloatField(blank=True, null=True, verbose_name='市盈率')),
('pe_ttm', models.FloatField(blank=True, null=True, verbose_name='市盈率TTM')),
('pb', models.FloatField(blank=True, null=True, verbose_name='市净率')),
('total_share', models.FloatField(blank=True, null=True, verbose_name='总股本')),
('float_share', models.FloatField(blank=True, null=True, verbose_name='流通股本')),
('free_share', models.FloatField(blank=True, null=True, verbose_name='自由流通股本')),
('total_mv', models.FloatField(blank=True, null=True, verbose_name='总市值')),
('float_mv', models.FloatField(blank=True, null=True, verbose_name='流通市值')),
('company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stockmarket.StockNameCodeMap')),
],
options={
'verbose_name': '指数每日基本',
'verbose_name_plural': '指数每日基本',
'ordering': ['-last_mod_time'],
'get_latest_by': 'id',
},
),
]
| [
"[email protected]"
] | |
ea5ebbf433af25dabea331bab41b84e86467975b | 1f91f88b17b35a9306b6a279ec338921a22c78d0 | /team_scripts/nakama_exp076_plus_fp_addition_95.py | 97c5fe029241634165c6ce074b9ddc99e16baa90 | [
"MIT"
] | permissive | yuv4r4j/kaggle-rfcx | 64d02102a78a295e14e8a904e2a34bb7772d2cb4 | c3573d014d99312b58882e7b939de6c1055129b1 | refs/heads/main | 2023-03-03T01:38:14.999693 | 2021-02-18T10:31:29 | 2021-02-18T10:31:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,120 | py | import os
import random
import time
import warnings
import audiomentations as A
import kornia.augmentation as K
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import Dataset
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, CosineAnnealingWarmRestarts
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from tqdm import tqdm
from transformers import get_linear_schedule_with_warmup
warnings.filterwarnings("ignore")
################################################
# Directory Setting #
################################################
OUTPUT_DIR = Path(f"../out/{__file__.split('/')[-1].replace('.py', '')}")
OUTPUT_DIR.mkdir(exist_ok=True, parents=True)
################################################
# Config #
################################################
class CFG:
debug = False
apex = False
num_workers = 20
model_name = "tf_efficientnet_b0_ns"
model_param = {
"encoder": model_name,
"classes_num": 24
}
duration = 10
period = 6
step_scheduler = True
epochs = 60
T_max = 10
T_0 = 10
lr = 1e-3
min_lr = 0.0
batch_size = 64
weight_decay = 1e-6
gradient_accumulation_steps = 1
max_grad_norm = 1000
alpha = 1.0
mixup_epochs = 0
p_mixup = 0.0
p_cutmux = 0.0
seed = 777
target_size = 24
target_col = "target"
n_fold = 5
trn_fold = [0, 1, 2, 3, 4]
train = True
inference = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################################################
# Utilities #
################################################
def _one_sample_positive_class_precisions(scores: np.ndarray, truth: np.ndarray):
num_classes = scores.shape[0]
pos_class_indices = np.flatnonzero(truth > 0)
if not len(pos_class_indices):
return pos_class_indices, np.zeros(0)
retrieved_classes = np.argsort(scores)[::-1]
class_rankings = np.zeros(num_classes, dtype=np.int)
class_rankings[retrieved_classes] = range(num_classes)
retrieved_class_true = np.zeros(num_classes, dtype=np.bool)
retrieved_class_true[class_rankings[pos_class_indices]] = True
retrieved_cumulative_hits = np.cumsum(retrieved_class_true)
precision_at_hits = (
retrieved_cumulative_hits[class_rankings[pos_class_indices]] /
(1 + class_rankings[pos_class_indices].astype(np.float)))
return pos_class_indices, precision_at_hits
def lwlrap(truth: np.ndarray, scores: np.ndarray):
assert truth.shape == scores.shape
num_samples, num_classes = scores.shape
precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))
for sample_num in range(num_samples):
pos_class_indices, precision_at_hits = _one_sample_positive_class_precisions(scores[sample_num, :], truth[sample_num, :])
precisions_for_samples_by_classes[sample_num, pos_class_indices] = precision_at_hits
labels_per_class = np.sum(truth > 0, axis=0)
weight_per_class = labels_per_class / float(np.sum(labels_per_class))
per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /
np.maximum(1, labels_per_class))
return per_class_lwlrap, weight_per_class
def get_score(y_true: np.ndarray, y_pred: np.ndarray):
"""
y_true = np.array([[1, 0, 0], [0, 0, 1]])
y_pred = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
"""
score_class, weight = lwlrap(y_true, y_pred)
score = (score_class * weight).sum()
return score
def init_logger(log_file=OUTPUT_DIR / 'train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
LOGGER = init_logger()
seed_torch(seed=CFG.seed)
################################################
# Data Loading #
################################################
traint = pd.read_csv("../input/train_tp.csv")
trainf = pd.read_csv("../input/train_fp.csv")
traint["istp"] = 1
trainf["istp"] = 0
test = pd.read_csv("../input/sample_submission.csv")
print(traint.shape, trainf.shape, test.shape)
traint_additional = pd.read_csv("../input/train_fp_additional_classwise_threshold_95_percentile.csv")
traint_additional["istp"] = 1
PERIOD = CFG.period
TIME = CFG.duration
SR = 48000
FMIN = 40
FMAX = SR // 2
IMAGE_WIDTH = 320
IMAGE_HEIGHT = 320
N_MELS = IMAGE_HEIGHT
HOP_SIZE = 512
WINDOW_SIZE = 512 * 4
# 各speciesのfminとmfaxを求める
species_fmin = traint.groupby("species_id")["f_min"].agg(min).reset_index()
species_fmax = traint.groupby("species_id")["f_max"].agg(max).reset_index()
species_fmin_fmax = pd.merge(species_fmin, species_fmax, on="species_id")
MEL_FREQ = librosa.mel_frequencies(fmin=FMIN, fmax=FMAX, n_mels=IMAGE_HEIGHT)
def search_bin(value):
n = 0
for i, v in enumerate(MEL_FREQ):
if v < value:
pass
else:
n = i - 1
break
return n
# mel specに変換したときの座標を求める
# https://akifukka.hatenablog.com/entry/text2speech2
species_fmin_fmax["f_min_mel"] = species_fmin_fmax["f_min"].map(search_bin)
species_fmin_fmax["f_max_mel"] = species_fmin_fmax["f_max"].map(search_bin)
# train_tpにmelの情報をmerge
species_fmin_fmax["species_id"] = species_fmin_fmax["species_id"].astype(int)
traint["species_id"] = traint["species_id"].astype(int)
trainf["species_id"] = trainf["species_id"].astype(int)
traint_additional["specids_id"] = traint_additional["species_id"].astype(int)
species_fmin_fmax.drop(["f_min", "f_max"], inplace=True, axis=1)
traint = pd.merge(traint, species_fmin_fmax, on="species_id", how="left")
trainf = pd.merge(trainf, species_fmin_fmax, on="species_id", how="left")
traint_additional = pd.merge(traint_additional, species_fmin_fmax, on="species_id", how="left")
# tpとfpをconcat
train_df = pd.concat([traint, trainf], axis=0).reset_index()
print(train_df.shape)
################################################
# CV split #
################################################
train_gby = train_df.groupby("recording_id")[["species_id"]].first().reset_index()
train_gby = train_gby.sample(frac=1, random_state=CFG.seed).reset_index(drop=True)
train_gby.loc[:, "kfold"] = -1
X = train_gby["recording_id"].values
y = train_gby["species_id"].values
kfold = StratifiedKFold(n_splits=CFG.n_fold)
for fold, (t_idx, v_idx) in enumerate(kfold.split(X, y)):
train_gby.loc[v_idx, "kfold"] = fold
train_df = train_df.merge(train_gby[["recording_id", "kfold"]], on="recording_id", how="left")
traint_additional = traint_additional.merge(train_gby[["recording_id", "kfold"]], on="recording_id", how="left")
print(train_df.kfold.value_counts())
train_df.to_csv(OUTPUT_DIR / "folds.csv", index=False)
traint_additional.to_csv(OUTPUT_DIR / "folds_additional.csv", index=False)
species_fmin_fmax.to_csv(OUTPUT_DIR / "species_fmin_fmax.csv", index=False)
################################################
# audiomentations #
################################################
augmenter = A.Compose([
A.AddGaussianNoise(min_amplitude=0.01, max_amplitude=0.03, p=0.2),
A.PitchShift(min_semitones=-3, max_semitones=3, p=0.2),
A.Gain(p=0.2)
])
################################################
# Dataset #
################################################
def cut_spect(spect: torch.Tensor, fmin_mel: int, fmax_mel: int):
return spect[fmin_mel:fmax_mel]
def do_normalize(img: torch.Tensor):
bs, ch, w, h = img.shape
_img = img.clone()
_img = _img.view(bs, -1)
_img -= _img.min(1, keepdim=True)[0]
_img /= _img.max(1, keepdim=True)[0]
_img = _img.view(bs, ch, w, h) * 255
return _img
class AudioDataset(Dataset):
def __init__(self, df, period=PERIOD, time=TIME,
transforms=None, data_path="../input/rfcx-species-audio-detection/train"):
dfgby = df.groupby("recording_id").agg(lambda x: list(x)).reset_index()
self.period = period
self.transforms = transforms
self.data_path = data_path
self.time = time
self.recording_ids = dfgby["recording_id"].values
self.species_ids = dfgby["species_id"].values
self.t_mins = dfgby["t_min"].values
self.t_maxs = dfgby["t_max"].values
self.f_mins = dfgby["f_min"].values
self.f_maxs = dfgby["f_max"].values
self.f_min_mels = dfgby["f_min_mel"].values
self.f_max_mels = dfgby["f_max_mel"].values
self.istps = dfgby["istp"].values
def __len__(self):
return len(self.recording_ids)
def __getitem__(self, idx):
recording_id = self.recording_ids[idx]
species_id = self.species_ids[idx]
istp = self.istps[idx]
t_min, t_max = self.t_mins[idx], self.t_maxs[idx]
f_min, f_max = self.f_mins[idx], self.f_maxs[idx]
f_min_mel, f_max_mel = self.f_min_mels[idx], self.f_max_mels[idx]
# 読み込む
y, sr = sf.read(f"{self.data_path}/{recording_id}.flac")
len_y = len(y) # 全フレーム数
# sampling rate(frame/sec)と取得期間(sec)をかけて必要なフレームを取得
effective_length = sr * self.time
rint = np.random.randint(len(t_min))
# tmin, tmaxをフレーム数に変換
tmin, tmax = round(sr * t_min[rint]), round(sr * t_max[rint])
cut_min = max(0, min(tmin - (effective_length - (tmax-tmin)) // 2,
min(tmax + (effective_length - (tmax-tmin)) // 2,
len_y) - effective_length))
extra = tmax+(effective_length - (tmax-tmin))//2 - len_y
lack = tmin - (effective_length - (tmax-tmin)) // 2
start = cut_min + np.random.randint(0, (self.time-self.period)*sr)
if extra > 0:
start = np.random.randint(tmax-(tmax-tmin)//2-self.period*sr, len_y-self.period*sr)
if lack < 0:
start = cut_min + np.random.randint(0, tmin)
end = start + self.period * sr
y = y[start:end]
if self.transforms:
# 音声のAugumentation(gaussianノイズとか)が入ってる
y = self.transforms(samples=y, sample_rate=sr)
# start(フレーム数)->time(sec)に変換
# start_timeはeffective_lengthの左端
start_time = start / sr
end_time = end / sr
label = np.zeros(24, dtype='f')
new_tmins = []
new_tmaxs = []
new_fmins = []
new_fmaxs = []
new_sids = []
new_istp = []
for i in range(len(t_min)):
# 今回、複数のt_minから選んでいるため、データによってはTP,FPの期間がオーバーラップしている
if (t_min[i] >= start_time) & (t_max[i] <= end_time):
if f_min_mel[rint] <= (f_min_mel[i]+f_max_mel[i])/2 <= f_max_mel[rint]:
if label[species_id[i]] == 0:
label[species_id[i]] = 1 * istp[i]
new_tmins.append(t_min[i]-start_time)
new_tmaxs.append(t_max[i]-start_time)
new_fmins.append(f_min[i])
new_fmaxs.append(f_max[i])
new_sids.append(species_id[i])
new_istp.append(istp[i])
elif start_time <= ((t_min[i] + t_max[i]) / 2) <= end_time: # bboxの重心がeffective_lengthの中にある
if f_min_mel[rint] <= (f_min_mel[i]+f_max_mel[i])/2 <= f_max_mel[rint]:
if label[species_id[i]] == 0:
label[species_id[i]] = 1 * istp[i]
new_tmin = 0
new_tmax = 0
if t_min[i] - start_time < 0:
new_tmin = 0
else:
new_tmin = t_min[i] - start_time
if t_max[i] - start_time < 0:
new_tmax = 0
elif t_max[i] > end_time:
new_tmax = end_time - start_time
else:
new_tmax = t_max[i] - start_time
new_tmins.append(new_tmin)
new_tmaxs.append(new_tmax)
new_fmins.append(f_min[i])
new_fmaxs.append(f_max[i])
new_sids.append(species_id[i])
new_istp.append(istp[i])
return {
"wav": torch.tensor(y, dtype=torch.float),
"target": torch.tensor(label, dtype=torch.float),
"id": recording_id,
"f_min_mel": f_min_mel[rint],
"f_max_mel": f_max_mel[rint],
}
class ValidDataset(Dataset):
def __init__(self, df, period=PERIOD, transforms=None, data_path="../input/rfcx-species-audio-detection/train"):
dfgby = df.groupby("recording_id").agg(lambda x: list(x)).reset_index()
self.period = period
self.transforms = transforms
self.data_path = data_path
self.recording_ids = dfgby["recording_id"].values
self.species_ids = dfgby["species_id"].values
self.t_mins = dfgby["t_min"].values
self.t_maxs = dfgby["t_max"].values
self.f_mins = dfgby["f_min"].values
self.f_maxs = dfgby["f_max"].values
self.f_min_mels = dfgby["f_min_mel"].values
self.f_max_mels = dfgby["f_max_mel"].values
self.istps = dfgby["istp"].values
def __len__(self):
return len(self.recording_ids)
def __getitem__(self, idx):
recording_id = self.recording_ids[idx]
species_id = self.species_ids[idx]
istp = self.istps[idx]
t_min, t_max = self.t_mins[idx], self.t_maxs[idx]
f_min, f_max = self.f_mins[idx], self.f_maxs[idx]
f_min_mel, f_max_mel = self.f_min_mels[idx], self.f_max_mels[idx]
rint = np.random.randint(len(t_min))
# 読み込む
y, sr = sf.read(f"{self.data_path}/{recording_id}.flac")
# tmin, tmaxをフレーム数に変換
tmin, tmax = round(sr * t_min[rint]), round(sr * t_max[rint])
len_y = len(y) # 全フレーム数
# sampling rate(frame/sec)と取得期間(sec)をかけて必要なフレームを取得
effective_length = sr * self.period # 6 sec
start = 0
start = max(0, min(tmin - (effective_length - (tmax-tmin)) // 2,
min(tmax + (effective_length - (tmax-tmin)) // 2,
len_y) - effective_length))
end = start + effective_length
y = y[start:end]
start_time = start / sr
end_time = end / sr
label = np.zeros(24, dtype='f')
new_tmins = []
new_tmaxs = []
new_fmins = []
new_fmaxs = []
new_sids = []
new_istp = []
for i in range(len(t_min)):
# 今回、複数のt_minから選んでいるため、データによってはTP,FPの期間がオーバーラップしている
if (t_min[i] >= start_time) & (t_max[i] <= end_time):
if f_min_mel[rint] <= (f_min_mel[i]+f_max_mel[i])/2 <= f_max_mel[rint]:
if label[species_id[i]] == 0:
label[species_id[i]] = 1 * istp[i]
new_tmins.append(t_min[i]-start_time)
new_tmaxs.append(t_max[i]-start_time)
new_fmins.append(f_min[i])
new_fmaxs.append(f_max[i])
new_sids.append(species_id[i])
new_istp.append(istp[i])
elif start_time <= ((t_min[i] + t_max[i]) / 2) <= end_time: # bboxの重心がeffective_lengthの中にある
if f_min_mel[rint] <= (f_min_mel[i]+f_max_mel[i])/2 <= f_max_mel[rint]:
if label[species_id[i]] == 0:
label[species_id[i]] = 1 * istp[i]
new_tmin = 0
new_tmax = 0
if t_min[i] - start_time < 0:
new_tmin = 0
else:
new_tmin = t_min[i] - start_time
if t_max[i] - start_time < 0:
new_tmax = 0
elif t_max[i] > end_time:
new_tmax = end_time - start_time
else:
new_tmax = t_max[i] - start_time
new_tmins.append(new_tmin)
new_tmaxs.append(new_tmax)
new_fmins.append(f_min[i])
new_fmaxs.append(f_max[i])
new_sids.append(species_id[i])
new_istp.append(istp[i])
return {
"wav": torch.tensor(y, dtype=torch.float),
"target": torch.tensor(label, dtype=torch.float),
"id": recording_id,
"f_min_mel": f_min_mel[rint],
"f_max_mel": f_max_mel[rint],
}
class TestDataset(Dataset):
def __init__(self, df, period=PERIOD, transforms=None, data_path="../input/rfcx-species-audio-detection/test"):
self.period = period
self.transforms = transforms
self.data_path = data_path
self.recording_ids = df["recording_id"].values
def __len__(self):
return len(self.recording_ids)
def __getitem__(self, idx):
recording_id = self.recording_ids[idx]
y, sr = sf.read(f"{self.data_path}/{recording_id}.flac")
len_y = len(y)
# フレーム数に変換
effective_length = sr * self.period
y_ = []
i = 0
while i < len_y:
# インクリメントしていき全部を舐めていく(effective_lengthずつ飛ばしているけど良い??)
y__ = y[i:i+effective_length]
if effective_length > len(y__):
break
else:
y_.append(y__)
i = i + int(effective_length)
y = np.stack(y_) # (effective_length, 2N)
label = np.zeros(24, dtype='f')
# y: clip nums, seq -> clip_nums, width, height
return {
"wav": torch.tensor(y, dtype=torch.float),
"target": torch.tensor(label, dtype=torch.float),
"id": recording_id,
}
################################################
# Model #
################################################
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.)
class AudioClassifier(nn.Module):
def __init__(self, model_name, n_out):
super(AudioClassifier, self).__init__()
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=80, time_stripes_num=2,
freq_drop_width=16, freq_stripes_num=2)
self.net = timm.create_model(model_name, pretrained=True, in_chans=1)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout1 = nn.Dropout(0.3)
self.dropout2 = nn.Dropout(0.3)
n_features = self.net.classifier.in_features
self.net_classifier = nn.Linear(n_features, n_out)
self.init_weight()
# korrniaのrandom cropはh,wを想定しているため注意
self.transform = nn.Sequential(K.RandomHorizontalFlip(p=0.1),
# K.GaussianBlur(7, p=0.5),
# K.RandomCrop((round(IMAGE_HEIGHT*0.7), round(IMAGE_WIDTH*0.7)),p=0.3)
)
def init_weight(self):
init_layer(self.net_classifier)
def forward(self, x, f_min_mels, f_max_mels, train=True, test=False): # x: (bs, 1, w, h)
# f_min_melとf_max_melによってカットする
bs, ch, w, h = x.shape
x = x.reshape(bs*w, -1)
bsw = bs*w
spects = []
fi = 0
if test:
for ii, i in enumerate(range(bsw)[::w]):
spect = x[i:i+w] # torch (w, h)
for f_min, f_max in zip(f_min_mels, f_max_mels):
_spect = cut_spect(spect.transpose(0, 1), f_min, f_max).transpose(0, 1) # out:torch (w, h)
# resizeする.
_spect = torch.unsqueeze(_spect, 0)
_spect = torch.unsqueeze(_spect, 0) # torch(1,1,w,h)
_spect = F.interpolate(_spect, (IMAGE_WIDTH, IMAGE_HEIGHT),
mode='bilinear',
align_corners=False) # out: torch (1, 1, w, h)
_spect = torch.squeeze(_spect, 0) # out: torch (1, w, h)
spects.append(_spect)
x = torch.stack(spects) # torch (bs, 1, w, h) bs=24*bs*10
else:
for ii, i in enumerate(range(bsw)[::w]):
spect = x[i:i+w] # torch (w, h)
f_min = f_min_mels[fi]
f_max = f_max_mels[fi]
spect = cut_spect(spect.transpose(0, 1), f_min, f_max).transpose(0, 1) # out:torch (w, h)
# resizeする.
spect = torch.unsqueeze(spect, 0)
spect = torch.unsqueeze(spect, 0) # torch(1,1,w,h)
spect = F.interpolate(spect, (IMAGE_WIDTH, IMAGE_HEIGHT),
mode='bilinear',
align_corners=False) # out: torch (1, 1, w, h)
if train:
spect = self.transform(spect.transpose(2, 3)) # out: torch(1,1,h,w)
spect = spect.transpose(2, 3) # out: torch(1,1,w,h)
spect = torch.squeeze(spect, 0) # torch (1, w, h)
spects.append(spect)
fi += 1
x = torch.stack(spects) # torch (bs, 1, w, h)
x = do_normalize(x)
if train:
x = self.spec_augmenter(x)
# x = x.expand(x.shape[0], 3, x.shape[2], x.shape[3]) # channel分複製
# Output shape (batch size, channels, time, frequency)
x = self.net.forward_features(x)
x = self.avg_pool(x).flatten(1)
x = self.dropout1(x)
x = self.net_classifier(x)
return x
################################################
# Loss #
################################################
def f1_loss(y_true, y_pred, is_training=False, epsilon=1e-7) -> torch.Tensor:
'''
Calculate F1 score. Can work with gpu tensors
The original implmentation is written by Michal Haltuf on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. 0 <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
'''
y_pred = y_pred > 0.5
tp = (y_true * y_pred).sum()
fp = ((1 - y_true) * y_pred).sum()
fn = (y_true * (1 - y_pred)).sum()
precision = tp / (tp + fp + epsilon)
recall = tp / (tp + fn + epsilon)
f1 = 2 * (precision*recall) / (precision + recall + epsilon)
return f1
# https://www.kaggle.com/c/rfcx-species-audio-detection/discussion/213075
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1. - probas)**self.gamma * bce_loss + (1. - targets) * probas**self.gamma * bce_loss
loss = loss.mean()
return loss
################################################
# Training helper functions #
################################################
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MetricMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.y_true = []
self.y_pred = []
def update(self, y_true, y_pred):
self.y_true.extend(y_true.cpu().detach().numpy().tolist())
self.y_pred.extend(torch.sigmoid(y_pred).cpu().detach().numpy().tolist())
@property
def avg(self):
self.f1score = f1_loss(np.array(self.y_true), np.array(self.y_pred))
score_class, weight = lwlrap(np.array(self.y_true), np.array(self.y_pred))
self.score = (score_class * weight).sum()
return {
"lwlrap": self.score,
"f1score": self.f1score
}
def get_mixup_indices(bs, f_min_mels, f_max_mels):
indices_matrix = np.zeros((bs, bs))
for img1_idx in range(bs):
for img2_idx in range(bs):
if img1_idx != img2_idx:
mix_flag = (f_min_mels[img2_idx] >= f_min_mels[img1_idx]) & (f_max_mels[img2_idx] <= f_max_mels[img1_idx])
if mix_flag:
indices_matrix[img1_idx, img2_idx] = 1
break # img1に対してmixupするimg2(img1の周波数帯に存在するもの)が1つ見つかり次第終了
indices = np.arange(bs)
indices_matrix_1 = np.where(indices_matrix == 1)
for i, j in zip(indices_matrix_1[0], indices_matrix_1[1]):
if i in range(bs):
indices[i] = j
else:
indices[i] = i
return indices
def mixup(data, targets, f_min_mels, f_max_mels, alpha=1.0):
bs = data.size(0)
indices = get_mixup_indices(bs, f_min_mels, f_max_mels)
shuffled_data = data[indices]
shuffled_targets = targets[indices]
# lam = np.random.beta(alpha, alpha)
lam = 0.5
data = data * lam + shuffled_data * (1 - lam)
targets = targets * lam + shuffled_targets * (1 - lam)
return data, targets
def train_epoch(model, spectrogram_extractor, logmel_extractor, loader,
criterion, optimizer, scheduler, epoch, device, p_mixup,
normalize=True, resize=True, spec_aug=True):
losses = AverageMeter()
scores = MetricMeter()
model.train()
t = tqdm(loader)
for i, sample in enumerate(t):
x = sample['wav'].to(device) # (bs, seq)
target = sample['target'].to(device)
f_min_mels = sample["f_min_mel"]
f_max_mels = sample["f_max_mel"]
x = spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
x = logmel_extractor(x)
if np.random.rand(1) < p_mixup:
# mixup
mix_x, mix_target = mixup(x, target, f_min_mels, f_max_mels)
output = model(mix_x, f_min_mels, f_max_mels, train=True)
loss = criterion(output, mix_target)
else:
output = model(x, f_min_mels, f_max_mels, train=True)
loss = criterion(output, target)
if CFG.gradient_accumulation_steps > 1:
loss = loss / CFG.gradient_accumulation_steps
else:
loss.backward()
if (i + 1) % CFG.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
if CFG.step_scheduler:
scheduler.step()
bs = x.size(0)
scores.update(target, output)
losses.update(loss.item(), bs)
t.set_description(f"Train E:{epoch} - Loss{losses.avg:0.4f}")
t.close()
return scores.avg, losses.avg
def valid_epoch(model, spectrogram_extractor, logmel_extractor,
loader, criterion, epoch, device):
losses = AverageMeter()
scores = MetricMeter()
model.eval()
with torch.no_grad():
t = tqdm(loader)
for i, sample in enumerate(t):
x = sample['wav'].to(device) # (bs, seq)
target = sample['target'].to(device)
f_min_mels = sample["f_min_mel"]
f_max_mels = sample["f_max_mel"]
x = spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
x = logmel_extractor(x)
bs = x.size(0)
output = model(x, f_min_mels, f_max_mels, train=False)
# output = output.reshape(bs, 24, -1) #(bs, 24, 24) batchsize,
# output, _ = torch.max(output, dim=1)
loss = criterion(output, target)
scores.update(target, output)
losses.update(loss.item(), bs)
t.set_description(f"Valid E:{epoch} - Loss:{losses.avg:0.4f}")
t.close()
return scores.avg, losses.avg
def test_epoch(model, spectrogram_extractor, logmel_extractor, loader,
f_min_mels, f_max_mels, device, normalize=True, resize=True):
model.eval()
pred_list = []
id_list = []
with torch.no_grad():
t = tqdm(loader)
for i, sample in enumerate(t):
x = sample["wav"].to(device)
bs, c, seq = x.shape
x = x.reshape(bs*c, seq)
x = spectrogram_extractor(x)
x = logmel_extractor(x)
id = sample["id"]
output = torch.sigmoid(model(x, f_min_mels, f_max_mels, train=False, test=True))
output = output.reshape(bs, c*24, -1)
output, _ = torch.max(output, dim=1)
output = output.cpu().detach().numpy().tolist()
pred_list.extend(output)
id_list.extend(id)
return pred_list, id_list
def get_valid_all_clip_result(fold: int):
# Load Data
train_df = pd.read_csv(OUTPUT_DIR / "folds.csv")
train_df = train_df[train_df["istp"] == 1].reset_index(drop=True)
species_fmin_fmax = pd.read_csv(OUTPUT_DIR / "species_fmin_fmax.csv")
f_min_mels = torch.tensor(species_fmin_fmax["f_min_mel"].values, dtype=torch.int)
f_max_mels = torch.tensor(species_fmin_fmax["f_max_mel"].values, dtype=torch.int)
# Load model
model = AudioClassifier(CFG.model_param["encoder"], CFG.model_param["classes_num"])
model.load_state_dict(torch.load(OUTPUT_DIR / f'fold-{fold}.bin'))
model = model.to(device)
# Get valid
valid_fold = train_df[train_df.kfold == fold].reset_index(drop=True)
test_dataset = TestDataset(
df=valid_fold,
period=CFG.period,
transforms=None,
data_path="../input/train",
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=CFG.batch_size//32,
shuffle=False,
drop_last=False,
num_workers=CFG.num_workers
)
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
spectrogram_extractor = Spectrogram(n_fft=WINDOW_SIZE, hop_length=HOP_SIZE,
win_length=WINDOW_SIZE, window=window,
center=center, pad_mode=pad_mode,
freeze_parameters=True).to(device)
logmel_extractor = LogmelFilterBank(sr=SR, n_fft=WINDOW_SIZE,
n_mels=N_MELS, fmin=FMIN, fmax=FMAX,
ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True).to(device)
test_pred, ids = test_epoch(model, spectrogram_extractor, logmel_extractor, test_loader,
f_min_mels, f_max_mels, device, resize=True)
test_pred_df = pd.DataFrame({
"recording_id": valid_fold.recording_id.values
})
test_pred_df["kfold"] = fold
for i in range(24):
test_pred_df[f"s{i}"] = 0
test_pred_df[[f's{i}' for i in range(24)]] = test_pred
return test_pred_df
def inference(fold: int):
# Load Data
sub_df = pd.read_csv("../input/sample_submission.csv")
species_fmin_fmax = pd.read_csv(OUTPUT_DIR / "species_fmin_fmax.csv")
f_min_mels = torch.tensor(species_fmin_fmax["f_min_mel"].values, dtype=torch.int)
f_max_mels = torch.tensor(species_fmin_fmax["f_max_mel"].values, dtype=torch.int)
# Load model
model = AudioClassifier(CFG.model_param["encoder"], CFG.model_param["classes_num"])
model.load_state_dict(torch.load(OUTPUT_DIR / f'fold-{fold}.bin'))
model = model.to(device)
# Get valid
test_dataset = TestDataset(
df=sub_df,
period=CFG.period,
transforms=None,
data_path="../input/test",
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=CFG.batch_size//32,
shuffle=False,
drop_last=False,
num_workers=CFG.num_workers
)
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
spectrogram_extractor = Spectrogram(n_fft=WINDOW_SIZE, hop_length=HOP_SIZE,
win_length=WINDOW_SIZE, window=window,
center=center, pad_mode=pad_mode,
freeze_parameters=True).to(device)
logmel_extractor = LogmelFilterBank(sr=SR, n_fft=WINDOW_SIZE,
n_mels=N_MELS, fmin=FMIN, fmax=FMAX,
ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True).to(device)
test_pred, ids = test_epoch(model, spectrogram_extractor, logmel_extractor, test_loader,
f_min_mels, f_max_mels, device, resize=True)
test_pred_df = pd.DataFrame({
"recording_id": sub_df.recording_id.values
})
test_pred_df["kfold"] = fold
for i in range(24):
test_pred_df[f"s{i}"] = 0
test_pred_df[[f's{i}' for i in range(24)]] = test_pred
return test_pred_df
################################################
# Training Loop #
################################################
def train_loop(fold):
LOGGER.info(f"========== fold: {fold} training ==========")
train_df = pd.read_csv(OUTPUT_DIR / 'folds.csv')
train_additional = pd.read_csv(OUTPUT_DIR / "folds_additional.csv")
if CFG.debug:
train_df = train_df.sample(n=1000, random_state=42)
train_fold = train_df[train_df.kfold != fold]
train_additional_fold = train_additional[train_additional.kfold != fold]
valid_fold = train_df[train_df.kfold == fold]
columns = [
"recording_id", "species_id", "t_min", "f_min", "t_max", "f_max",
"istp", "f_min_mel", "f_max_mel", "kfold"
]
train_fold = train_fold[columns]
print(f"train fold before concat: {train_fold.shape}")
train_additional_fold = train_additional_fold[columns]
train_fold = pd.concat([train_fold, train_additional_fold], axis=0).reset_index(drop=True)
print(f"train fold after concat: {train_fold.shape}")
train_dataset = AudioDataset(
df=train_fold,
period=CFG.period,
time=CFG.duration,
transforms=augmenter,
data_path="../input/train",
)
valid_dataset = ValidDataset(
df=valid_fold,
period=CFG.period,
transforms=None,
data_path="../input/train"
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=CFG.batch_size,
shuffle=True,
drop_last=True,
num_workers=CFG.num_workers,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=CFG.batch_size//4,
shuffle=False,
drop_last=False,
num_workers=CFG.num_workers
)
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
spectrogram_extractor = Spectrogram(n_fft=WINDOW_SIZE, hop_length=HOP_SIZE,
win_length=WINDOW_SIZE, window=window,
center=center, pad_mode=pad_mode,
freeze_parameters=True).to(device)
logmel_extractor = LogmelFilterBank(sr=SR, n_fft=WINDOW_SIZE,
n_mels=N_MELS, fmin=FMIN, fmax=FMAX,
ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True).to(device)
# ====================================================
# scheduler
# ====================================================
def get_scheduler(optimizer):
if CFG.scheduler == 'ReduceLROnPlateau':
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=CFG.factor, patience=CFG.patience, verbose=True, eps=CFG.min_lr)
elif CFG.scheduler == 'CosineAnnealingLR':
scheduler = CosineAnnealingLR(optimizer, T_max=CFG.T_max, eta_min=CFG.min_lr, last_epoch=-1)
elif CFG.scheduler == 'CosineAnnealingWarmRestarts':
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CFG.T_0, T_mult=1, eta_min=CFG.min_lr, last_epoch=-1)
return scheduler
# ====================================================
# model & optimizer
# ====================================================
model = AudioClassifier(CFG.model_param["encoder"], CFG.model_param["classes_num"])
model = model.to(device)
# optimizer = Adam(model.parameters(), lr=CFG.lr, weight_decay=CFG.weight_decay, amsgrad=False)
# scheduler = get_scheduler(optimizer)
optimizer = torch.optim.AdamW(model.parameters(), lr=CFG.lr)
num_train_steps = int(len(train_loader) * CFG.epochs)
num_warmup_steps = int(0.1 * CFG.epochs * len(train_loader))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps)
# criterion = nn.BCEWithLogitsLoss()
criterion = BCEFocalLoss()
best_score = -np.inf
for epoch in range(CFG.epochs):
if epoch < CFG.mixup_epochs:
p_mixup = CFG.p_mixup
else:
p_mixup = 0.
start_time = time.time()
# train
train_avg, train_loss = train_epoch(model, spectrogram_extractor, logmel_extractor, train_loader,
criterion, optimizer, scheduler, epoch, device, p_mixup, spec_aug=True)
# valid
valid_avg, valid_loss = valid_epoch(model, spectrogram_extractor, logmel_extractor, valid_loader,
criterion, epoch, device)
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(valid_loss)
elif isinstance(scheduler, CosineAnnealingLR):
scheduler.step()
elif isinstance(scheduler, CosineAnnealingWarmRestarts):
scheduler.step()
elapsed = time.time() - start_time
LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {train_loss:.5f} avg_val_loss: {valid_loss:.5f} time: {elapsed:.0f}s')
LOGGER.info(f"Epoch {epoch+1} - train_LWLRAP:{train_avg['lwlrap']:0.5f} valid_LWLRAP:{valid_avg['lwlrap']:0.5f}")
LOGGER.info(f"Epoch {epoch+1} - train_F1:{train_avg['f1score']:0.5f} valid_F1:{valid_avg['f1score']:0.5f}")
if valid_avg['f1score'] > best_score:
LOGGER.info(f">>>>>>>> Model Improved From {best_score} ----> {valid_avg['f1score']}")
torch.save(model.state_dict(), OUTPUT_DIR / f'fold-{fold}.bin')
best_score = valid_avg['f1score']
def get_master_df():
df = pd.read_csv("../input/train_tp.csv").sort_values("recording_id")
df['species_ids'] = df['species_id'].astype(str)
label_dict = {}
for recording_id, tmp in df.groupby(['recording_id']):
label_dict[recording_id] = ' '.join(sorted(set(tmp['species_ids'].values)))
output = pd.DataFrame({'recording_id': df['recording_id'].unique()})
output['species_ids'] = output['recording_id'].map(label_dict)
y_true = np.zeros((len(output), 24))
for i, species in enumerate(output['species_ids'].values):
for s in species.split():
y_true[i, int(s)] = 1
for i in range(24):
output[f"true_s{i}"] = 0
output[[f'true_s{i}' for i in range(24)]] = y_true
return output.reset_index(drop=True)
def get_result(oof_df):
y_true = np.zeros((len(oof_df), 24))
for i, species in enumerate(oof_df['species_ids'].values):
for s in species.split():
y_true[i, int(s)] = 1
preds = oof_df[[f's{i}' for i in range(24)]].values
score = get_score(y_true, preds)
LOGGER.info(f'LWLRAP Score: {score:<.5f}')
################################################
# Main #
################################################
def main():
if CFG.train:
master_df = get_master_df()
# train
oof_df = pd.DataFrame()
for fold in range(CFG.n_fold):
if fold in CFG.trn_fold:
train_loop(fold)
_oof_df = get_valid_all_clip_result(fold)
_oof_df = _oof_df.merge(master_df, on='recording_id', how='left')
oof_df = pd.concat([oof_df, _oof_df])
LOGGER.info(f"========== fold: {fold} result ==========")
get_result(_oof_df)
# CV result
LOGGER.info("========== CV ==========")
get_result(oof_df)
# save result
oof_df.to_csv(OUTPUT_DIR / 'oof_df.csv', index=False)
if CFG.inference:
# inference
LOGGER.info("========== inference ==========")
submission = pd.DataFrame()
for fold in range(CFG.n_fold):
if fold in CFG.trn_fold:
sub = inference(fold)
submission = pd.concat([submission, sub])
print(f'raw_submission: {submission.shape}')
submission.to_csv(OUTPUT_DIR / "raw_submission.csv", index=False)
sub = submission.groupby("recording_id", as_index=False).mean()
output_cols = ['recording_id'] + [f's{i}' for i in range(24)]
print(f'raw_submission: {sub.shape}')
sub[output_cols].to_csv(OUTPUT_DIR / "submission.csv", index=False)
LOGGER.info("========== submission ==========")
LOGGER.info(sub[output_cols].head())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
01e304b264f3b7116f5df2b1b3345739a66a4d8f | 5775513b81096d77b11bfe99949e4cbd80af20d4 | /jumpingintodjango/questionsandanswers/migrations/0005_auto__add_field_question_cmpnyvisit.py | 1fa82c8c942e0a57ee34a2c78567711f91edea9b | [] | no_license | gzpgg3x/BrowsingOR | 55234ba7b785675ea6b1d6a99c083aa0885fba74 | 15d467c6cc70beece93c699f2e9728509c3ce9f3 | refs/heads/master | 2016-09-06T11:00:20.737008 | 2013-04-29T16:19:53 | 2013-04-29T16:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,778 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.cmpnyvisit'
db.add_column(u'questionsandanswers_question', 'cmpnyvisit',
self.gf('django.db.models.fields.IntegerField')(default=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.cmpnyvisit'
db.delete_column(u'questionsandanswers_question', 'cmpnyvisit')
models = {
u'questionsandanswers.answer': {
'Meta': {'object_name': 'Answer'},
'best_answer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionsandanswers.Question']"})
},
u'questionsandanswers.question': {
'Meta': {'object_name': 'Question'},
'cmpnyvisit': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['questionsandanswers'] | [
"[email protected]"
] | |
4c6fafb9691b1da81bfce3f905fb7e709f18b562 | ae26322bb92f81ca9133fce5666dd6be796304d2 | /convert/convert_es/convert.py | 74c796eb6bdbd9820636dbbfa6044b63413b6cc2 | [] | no_license | christinabotros/gis-book | 8a7bba0ccb7202e7bf975b4f6ce9da5b4dc7dc00 | 575f7905b6c3688bb2341ad1c268381e956748c9 | refs/heads/master | 2021-09-07T10:22:08.340279 | 2017-05-02T13:46:08 | 2017-05-02T13:46:08 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 10,762 | py | #!/usr/bin/python
# -*- coding: cp1252 -*-
import re
import os.path
import sys
import fnmatch
import glob
import traceback
import json
import codecs
import time
import shutil
import zipfile
tableshtml={
"Tabla:PropiedadesVariablesVisuales": r'<table border="1"><col width="11%" /><col width="13%" /><col width="13%" /><col width="13%" /><col width="13%" /><col width="13%" /><col width="13%" /><col width="13%" /></colgroup><thead valign="bottom"><tr class="row-odd"><th class="head">Propiedad</th><th class="head">Posición</th><th class="head">Tamaño</th><th class="head">Forma</th><th class="head">Valor</th><th class="head">Tono</th><th class="head">Textura</th><th class="head">Orientación</th></tr></thead><tbody valign="top"><tr class="row-even"><td>Asociativa</td><td>◊</td><td>—</td><td>◊</td><td>—</td><td>◊</td><td>◊</td><td>◊</td></tr><tr class="row-odd"><td>Selectiva</td><td>◊</td><td>◊</td><td>—</td><td>◊</td><td>◊</td><td>◊</td><td>◊</td></tr><tr class="row-even"><td>Ordenada</td><td>◊</td><td>◊</td><td>—</td><td>◊</td><td>—</td><td>—</td><td>—</td></tr><tr class="row-odd"><td>Cuantitativa</td><td>◊</td><td>◊</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td></tr></tbody></table>'
}
exps_pre = [(r"\\bigskip", ""),
(r"\\ldots", "…"),
#(r"[\r\n]{2,}",r"<br><br>"),
(r"\\centering", ""),
(r"\\par[^t]", ""),
(r"\\degree", "°"),
(r"\\noindent", ""),
(r"\\vspace\{.*?\}", ""),
(r"\\begin\{center\}", ""),
(r"\\end\{center\}", ""),
(r"\\small", ""),
(r"\\emph\{(.*?)\}", r"<i>\1</i>"),
(r"\$(.*?)\$", r"<i>\1</i>")]
exps_post = [(r"\\index\{.*?\}", ""),
(r"\\pagestyle\{.*?\}",r""),
(r"\\%", "%"),
(r"\\_", "_"),
(r"\\underline\{(.*?)\}", r"<u>\1</u>"),
(r"\\footnote\{[\s\S]*?\}", ""),
(r"\\begin\{itemize\}", "<ul>"),
(r"\\end\{itemize\}", "</ul>"),
(r"\\begin\{enumerate\}", "<ol>"),
(r"\\end\{enumerate\}", "</ol>"),
(r"\\item", "<li>"),
(r"\\subitem", ""),
(r"\\texttt\{(.*?)\}", r"<tt>\1</tt>"),
(r"\\textbf\{(.*?)\}", r"<b>\1</b>"),
(r"\\chapter.*?\{(.*?)\}", r'<h1 id="\1">\1</h1>'),
(r"\\section.*?\{(.*?)\}", r'<h2 id="\1">\1</h2>'),
(r"\\subsection.*?\{(.*?)\}", r'<h3 id="\1">\1</h3>'),
(r"\\subsubsection.*?\{(.*?)\}", r'<h4 id="\1">\1</h4>'),
(r"---", "—"),
(">>", "»"),
("<<", "«"),
(r"([\s\S]*?)[\r\n]{2,}", r"<p>\1</p>"),
(r"<p><h",r"<h"),
(r"(</h.>)</p>",r"\1"),
(r"<p><pre>", r"<pre>"),
(r"</pre></p>", r"</pre>"),
(r"><br><br>",r"><br>"),
]
def template():
path = os.path.join(os.path.dirname(__file__), "html", "template.html")
with open(path) as f:
s = f.read()
return s
def convertFile(path, chapterNum):
name = os.path.splitext(os.path.basename(path))[0]
with open(path) as f:
s = f.read()
for exp, replace in exps_pre:
p = re.compile(exp)
s = p.sub(replace, s)
p = re.compile(r"\\begin\{figure\}[\s\S]*?\\end\{figure\}?")
imgs = p.findall(s)
for i, img in enumerate(imgs):
f = re.search(r"\\includegraphics.*?\{(.*?)\}", img).groups()[0]
path, ext = os.path.splitext(f)
if ext == ".pdf":
ext = ".png"
path = os.path.basename(path)
size = img[img.find("["):img.rfind("]")]
size = "".join([d for d in size if d in "0123456789."])
try:
size = float(size) * 100
except:
size = 100
caption = re.search(r"\\caption\{(.*?)\}", img).groups()[0]
label = re.search(r"\\label\{(.*?)\}", img).groups()[0]
figNum = "%i.%i" % (chapterNum, i + 1)
s = s.replace(img, (r"<a name='%s'></a><center><figure><img src='img/%s%s' width='%s%%'/>"
"<br><figcaption>Figura %s: %s</figcaption></figure></center>" % (label, path, ext, str(size), figNum, caption)))
s = s.replace("\\ref{%s}" % label, '<a href="#%s">%s</a>' % (label, figNum))
p = re.compile(r"(\\begin\{table[\S\s]*?\\end\{table.*?\})")
tables = p.findall(s)
for tableNum, table in enumerate(tables):
idx = table.find("Tabla:")
tablelabel = table[idx:table.find("}", idx)]
idx = table.find(r"\caption") + 9
caption = table[idx:table.find("}\n", idx)]
try:
replace = "<a name='%s'></a>%s<center><figcaption>Cuadro %s: %s</figcaption></center>" % (tablelabel, tableshtml[tablelabel], tableNum + 1, caption)
s = s.replace(table, replace)
s = s.replace("\\ref{%s}" % tablelabel, '<a href="#%s">%s</a>' % (label, tableNum))
except Exception, e:
pass
for exp, replace in exps_post:
p = re.compile(exp)
s = p.sub(replace, s)
html = template().replace("[BODY]", s)
with open(os.path.join(os.path.dirname(__file__), "html", name + ".html"), "w") as f:
f.write(html.decode('iso-8859-1').encode('utf8'))
return s
def convert():
src = os.path.join(os.path.dirname(__file__), "img")
dst = os.path.join(os.path.dirname(__file__), "html", "img")
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
chapterFiles = [os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "latex/es/prologo.tex")]
chapterNames = ["Introduccion", "Historia", "Fundamentos_cartograficos", "Datos",
"Fuentes_datos", "Software", "Bases_datos", "Analisis", "Visualizacion"]
chapterFiles.extend([os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
"latex/es/%s/%s.tex" % (n,n)) for n in chapterNames])
chapters = []
for i, f in enumerate(chapterFiles):
try:
chapter = convertFile(f, i + 1)
chapters.append(chapter)
except Exception, e:
traceback.print_exc()
pass
epub = zipfile.ZipFile(os.path.join(os.path.dirname(__file__), "ebook", "librosig.epub"), 'w')
epub.writestr("mimetype", "application/epub+zip")
epub.writestr("META-INF/container.xml", '''<container version="1.0"
xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="OEBPS/Content.opf" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>''');
index = '''<package version="2.0"
xmlns="http://www.idpf.org/2007/opf">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc:title>Introducción a los SIG</dc:title>
<dc:creator opf:file-as="Olaya, Víctor" opf:role="aut">Víctor Olaya</dc:creator>
<dc:language>es</dc:language>
<dc:description>Introducción a los Sistemas de Información Geográfica.</dc:description>
<meta name="cover" content="cover.jpg" />
</metadata>
<manifest>
<item id="cover" href="cover.html" media-type="application/xhtml+xml"/>
<item id="cover-image" href="cover.jpg" media-type="image/jpeg"/>
<item id="intro" href="intro.html" media-type="application/xhtml+xml"/>
%(manifest)s
</manifest>
<spine toc="ncx">
<itemref idref="cover" linear="no"/>
<itemref idref="intro" />
%(spine)s
</spine>
</package>'''
cover = '''<html>
<head>
<title>Cover</title>
<style type="text/css"> img { max-width: 100%; } </style>
</head>
<body>
<div id="cover-image">
<img src="cover.jpg" alt="Introducción a los SIG"/>
</div>
</body>
</html>'''
intro = '''<html>
<head>
<title>Introducción a los SIG</title>
</head>
<body>
<a name="start">
<h2>Introducción a los SIG</h2></a> </p>
<p>Copyright © Víctor Olaya. 2016</p>
<p>Versión del %s</p>
</body>
</html>'''
chapterTemplate = '''<html>
<head>
<title></title>
<link href="base.css" rel="stylesheet" type="text/css" />
</head>
<body>
%s
</body>
</html>'''
manifest = ""
spine = ""
import locale
try:
locale.setlocale(locale.LC_TIME, "esn")
except:
pass
epub.write(os.path.join(os.path.dirname(__file__), "ebook", "base.css"), os.path.join('OEBPS', "base.css"))
epub.write(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "covers", "ebook_es.jpg"), os.path.join('OEBPS', "cover.jpg"))
epub.writestr('OEBPS/intro.html', (intro % (time.strftime("%x"))).decode('iso-8859-1').encode('utf8'))
epub.writestr('OEBPS/cover.html', (intro % (time.strftime("%x"))).decode('iso-8859-1').encode('utf8'))
for i, html in enumerate(chapters):
manifest += '<item id="file_%s" href="%s.html" media-type="application/xhtml+xml"/>' % (
i+1, i+1)
spine += '<itemref idref="file_%s" />' % (i+1)
chapterText = chapterTemplate % html.replace("img/", "")
epub.writestr('OEBPS/%i.html' % (i+1), chapterText.decode('iso-8859-1').encode('utf8'))
for f in os.listdir(src):
fn = os.path.join(src, f)
epub.write(fn, os.path.join('OEBPS', f))
epub.writestr('OEBPS/Content.opf', (index % {
'manifest': manifest,
'spine': spine,
}).decode('iso-8859-1').encode('utf8'))
###############################
def findFiles(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def convertImages():
for f in findFiles('../latex', '*.pdf'):
from subprocess import call
dest = os.path.basename(f)
dest = os.path.splitext(dest)[0]
dest = "img/%s.png" % dest
commands = [r'"Inkscape.exe"', "--export-png=" + dest, f]
print " ".join(commands)
#call(commands)
###############################
if __name__ == '__main__':
convert()
| [
"[email protected]"
] | |
a460b11b44d5739c0f1a1c59783c8dcabe4843f2 | 02bfa3b84a5c811c3fd4c293b14b0846bd6ab3b5 | /SortingAlrorithms/QuickSort.py | 46b3cf3f67d0d478023a3d3c18746ddb48f32b9b | [] | no_license | Ronak912/Programming_Fun | 9dde0842245b62748b479924921383de07b24d16 | 2a504d0ef230d09007b8a268c356055ced5ca6c0 | refs/heads/master | 2020-04-06T03:40:52.046819 | 2020-04-03T22:25:36 | 2020-04-03T22:25:36 | 42,281,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | def quickSort(alist):
quickSortHelper(alist, 0, len(alist)-1)
def quickSortHelper(alist, first, last):
if first >= last:
return
splitpoint = partition(alist, first, last)
quickSortHelper(alist, first, splitpoint-1)
quickSortHelper(alist, splitpoint+1, last)
def partition(alist, first, last):
pivotvalue = alist[first]
leftmark = first+1
rightmark = last
while True:
while leftmark <= rightmark and alist[leftmark] <= pivotvalue:
leftmark += 1
while alist[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark -= 1
if rightmark < leftmark:
break
else:
temp = alist[leftmark]
alist[leftmark] = alist[rightmark]
alist[rightmark] = temp
temp = alist[first]
alist[first] = alist[rightmark]
alist[rightmark] = temp
return rightmark
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
quickSort(alist)
#print(alist) | [
"[email protected]"
] | |
14c5bba12db00e778ec048d589648f8d833c1e3e | 0b76e4db1f08f2d6d7b9379a884c2075f6e258c3 | /w9/G4/4.py | 8d5116ba23d233d71f26eeda927160fa5d04798e | [] | no_license | bobur554396/WD2020Spring | 244ec8b491f297646d1d37f1feeb3767b68b9180 | 2b833c9043701ebaa4d122f717c8465af8fd5677 | refs/heads/master | 2020-12-26T19:01:46.605344 | 2020-04-18T05:33:42 | 2020-04-18T05:33:42 | 237,606,624 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # Functions
def hello(request):
print('hi')
def sum(a, b):
return a + b
# c = sum(2, 3)
# print(c)
def mult(a, b=3, c=10):
return a * b * c
print(mult(2))
| [
"[email protected]"
] | |
60a087f8c198509615937f7b2d47732ab42cbb42 | c3a6e39441d70cd632adff3ade7d7d331f702bbf | /DocxTest.py | 60456d561aed8d9ef55091e23e4315a838a4c804 | [] | no_license | Yzp109062/programming | ef81f5588b28da130d6a4c608578cbf622c50e16 | b84aee684fe39623185749e5250ffb454a302176 | refs/heads/master | 2022-11-23T01:36:18.344437 | 2020-07-28T21:37:39 | 2020-07-28T21:37:39 | 284,621,508 | 1 | 0 | null | 2020-08-03T06:28:43 | 2020-08-03T06:28:42 | null | UTF-8 | Python | false | false | 502 | py | from docx import Document
from docx.shared import Pt
document = Document("/home/wesley/Desktop/Construction/Conlanging/Daellic/Daool Lexicon Working Version Python Test.docx")
style = document.styles["Normal"]
font = style.font
font.name = "Charis SIL"
font.size = Pt(12)
p = document.add_paragraph("Test ")
p.add_run("bold").bold = True
p.add_run(" and ")
p.add_run("italic").italic = True
p = document.add_paragraph("New paragraph")
document.save("/home/wesley/programming/DocxTestOutput.docx") | [
"[email protected]"
] | |
c59e659d909312483f20f07ec25ed071a0ab1d64 | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/histogram_20200204122135.py | 5d0a4eeb8d476dabb9528ff8aa4b80539dcd1d90 | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 5,137 | py | from clean_text import clean
from benchmark import bench
def tuple_hist(source):
''' Fastest - tuples are immutable. List of tuples: [('hello', 3), ('what', 4)]
Takes text. Stores each item in text, compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = clean(source)
# print(text)
for word in text:
# see if we've used the word before
counter = 0
if word in used:
continue
used.append(word)
print("hello")
for word2 in text:
if word == word2:
counter += 1
instance = (word, counter)
histo.append(instance)
# print(histo)
print('USED: ', used)
return histo
def list_hist(source):
''' List of lists histogram. [['hello', 1], ['you', 3], ['sir', 4]]
Takes text. Stores each item in text, compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = clean(source)
# print(text)
for word in text:
counter = 0
if word in used:
continue
used.append(word)
for word2 in text:
if word == word2:
counter += 1
instance = [word, counter]
histo.append(instance)
# print(histo)
return histo
def dict_hist(source):
''' Dictionary key value pairs {'hello':1, 'sir':2, 'how':5}
Takes text. Stores each item in text, compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.'''
histo_dict = {}
# used = []
text = clean(source)
# print(text)
for word in text:
if word in histo_dict:
histo_dict[word] += 1
else:
histo_dict[word] = 1
# print(histo_dict)
return histo_dict
def counts_list(source):
histo = []
instances = []
used = []
text = clean(source)
# print(text)
for word in text:
# check if the word has already been accounted
if word in used:
continue
counter = 0
used.append(word)
# for each word in the text if it matches a word in the same text,
# we have an instance of that word - so increase counter by 1
for word2 in text:
if word == word2:
counter += 1
# we know the word and we have the occurances stored in counter.
# create a list instance object with the word and its occurances
# and append it to the list of word instances.
instance = [word, counter]
instances.append(instance)
used_nums = []
for item in instances:
# check if the word frequency has been accounted for before
if item[1] in used_nums:
continue
used_nums.append(item[1])
membs = []
new_instance = (item[1], membs) # this is what an instance of our histogram looks like
# for one item in our instances we check if the frequency matches
# any other frequencies in the instances list. if it does we add those to members list
for item2 in instances:
if item[1] == item2[1]:
# print(item2[0])
membs.append(item2[0])
histo.append(new_instance)
# print(histo)
return histo
def unique_words(histo):
''' takes a histogram and returns the number of unique words in it.
'''
counter = 0
for item in histo:
if type(item[0]) == int: # if the first item is an integer
for word in item[1]:
# print(item[1])
counter += 1
else:
# print(item)
counter += 1
# print(counter)
return counter
def frequency(word, histo):
''' takes a word and histo, returns the frequency of that word in the histo
'''
for item in histo:
if word in item:
freq = 0
if type(item[0]) == int: # if the first item is an integer
freq = item[0]
else:
freq = item[1]
# print("{} freq: {}".format(word, freq))
return freq
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
listo_histo = list_hist("source.txt")
# print(listo_histo)
tuple_histo = tuple_hist(source)
print(tuple_histo)
# print(dict_hist('source.txt'))
# print(counts_list('source.txt'))
print('')
print(unique_words(list_hist("source.txt")))
print(unique_words(counts_list('source.txt')))
print('freq of fish: ', frequency('fish', list_hist("source.txt")))
print('freq of tax: ', frequency('tax', list_hist("source.txt")))
print('freq of i: ', frequency('i', list_hist("source.txt")))
print('benchmark for list hist: ', bench(listo_histo))
print('benchmark for dict hist: ', bench(dict_hist('source.txt')))
print('benchmark for tuple hist: ', bench(tuple_histo))
| [
"[email protected]"
] | |
ed7692ac1e2630e87218877cf5032e76083e7c98 | e5c3b3a044e826425dd0f783d5e38e5bfeb82626 | /diplomacy_research/proto/diplomacy_tensorflow/core/protobuf/transport_options_pb2.py | e079da40929cc5f94bf114d387992431fb51a4c9 | [
"MIT"
] | permissive | JACKHAHA363/research | 04f67f98dcd238092941725d531517ae2a4ab47f | e752f02f34936bbae904815708904cabda554b57 | refs/heads/master | 2020-09-14T23:42:32.337085 | 2019-11-22T03:36:35 | 2019-11-22T03:36:35 | 223,296,172 | 0 | 0 | null | 2019-11-22T01:15:52 | 2019-11-22T01:15:51 | null | UTF-8 | Python | false | true | 2,218 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: diplomacy_tensorflow/core/protobuf/transport_options.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='diplomacy_tensorflow/core/protobuf/transport_options.proto',
package='diplomacy.tensorflow',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:diplomacy_tensorflow/core/protobuf/transport_options.proto\x12\x14\x64iplomacy.tensorflow\"*\n\x10RecvBufRespExtra\x12\x16\n\x0etensor_content\x18\x01 \x03(\x0c\x62\x06proto3')
)
_RECVBUFRESPEXTRA = _descriptor.Descriptor(
name='RecvBufRespExtra',
full_name='diplomacy.tensorflow.RecvBufRespExtra',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensor_content', full_name='diplomacy.tensorflow.RecvBufRespExtra.tensor_content', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=126,
)
DESCRIPTOR.message_types_by_name['RecvBufRespExtra'] = _RECVBUFRESPEXTRA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RecvBufRespExtra = _reflection.GeneratedProtocolMessageType('RecvBufRespExtra', (_message.Message,), dict(
DESCRIPTOR = _RECVBUFRESPEXTRA,
__module__ = 'diplomacy_tensorflow.core.protobuf.transport_options_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.RecvBufRespExtra)
))
_sym_db.RegisterMessage(RecvBufRespExtra)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
c8da8eefa8c5d426331ac8e5cd31e22fc656e14e | 7825f583fc11716f0a7b6a90799170bdfa6cc969 | /apps/trade/models.py | fc00ed8ebcd5ba1c8bfafaa8ea5ef7538e3b19c9 | [] | no_license | dingmf/TTSX1 | ca49dc76a59e1f0e5357a40ac708487e0a32afa2 | b1bc1403114961a0830e273f14413f7421647005 | refs/heads/master | 2020-03-28T16:43:43.526808 | 2018-09-10T01:38:31 | 2018-09-10T01:38:31 | 148,722,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,897 | py | from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
from goods.models import Goods
User = get_user_model()
# Create your models here.
class ShoppingCart(models.Model):
"""
购物车
"""
user = models.ForeignKey(User, verbose_name=u"用户",on_delete=models.DO_NOTHING)
goods = models.ForeignKey(Goods, verbose_name=u"商品",on_delete=models.DO_NOTHING)
nums = models.IntegerField(default=0, verbose_name="购买数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = '购物车'
verbose_name_plural = verbose_name
unique_together = ("user", "goods")
def __str__(self):
return "%s(%d)".format(self.goods.name, self.nums)
class OrderInfo(models.Model):
"""
订单
"""
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
user = models.ForeignKey(User, verbose_name="用户",on_delete=models.DO_NOTHING)
order_sn = models.CharField(max_length=30, null=True, blank=True, unique=True, verbose_name="订单号")
trade_no = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name=u"交易号")
pay_status = models.CharField(choices=ORDER_STATUS, default="paying", max_length=30, verbose_name="订单状态")
post_script = models.CharField(max_length=200, verbose_name="订单留言")
order_mount = models.FloatField(default=0.0, verbose_name="订单金额")
pay_time = models.DateTimeField(null=True, blank=True, verbose_name="支付时间")
# 用户信息
address = models.CharField(max_length=100, default="", verbose_name="收货地址")
signer_name = models.CharField(max_length=20, default="", verbose_name="签收人")
singer_mobile = models.CharField(max_length=11, verbose_name="联系电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = u"订单"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
"""
订单的商品详情
"""
order = models.ForeignKey(OrderInfo, verbose_name="订单信息", related_name="goods",on_delete=models.DO_NOTHING)
goods = models.ForeignKey(Goods, verbose_name="商品",on_delete=models.DO_NOTHING)
goods_num = models.IntegerField(default=0, verbose_name="商品数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "订单商品"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn)
| [
"[email protected]"
] | |
8d952a384d61bfa8c0d257aa9b30e1060fa69354 | f7ff9607822bb8f347598c10d185941cf1956852 | /aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateOTAStaticUpgradeJobRequest.py | 7e7ee501d6c20d1a94d5c9e4dc901b71e523ad7c | [
"Apache-2.0"
] | permissive | djzqbx001/aliyun-openapi-python-sdk | 5ca32201c578528f4b4228c7636b36c3f60a7c60 | 7d2e3c854c4d70ed341f036f5f7be0310216c303 | refs/heads/master | 2023-09-06T10:17:55.489439 | 2021-11-19T04:26:37 | 2021-11-19T04:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,658 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateOTAStaticUpgradeJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateOTAStaticUpgradeJob','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RetryCount(self):
return self.get_query_params().get('RetryCount')
def set_RetryCount(self,RetryCount):
self.add_query_param('RetryCount',RetryCount)
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_NeedConfirm(self):
return self.get_query_params().get('NeedConfirm')
def set_NeedConfirm(self,NeedConfirm):
self.add_query_param('NeedConfirm',NeedConfirm)
def get_GroupType(self):
return self.get_query_params().get('GroupType')
def set_GroupType(self,GroupType):
self.add_query_param('GroupType',GroupType)
def get_NeedPush(self):
return self.get_query_params().get('NeedPush')
def set_NeedPush(self,NeedPush):
self.add_query_param('NeedPush',NeedPush)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_DownloadProtocol(self):
return self.get_query_params().get('DownloadProtocol')
def set_DownloadProtocol(self,DownloadProtocol):
self.add_query_param('DownloadProtocol',DownloadProtocol)
def get_TargetSelection(self):
return self.get_query_params().get('TargetSelection')
def set_TargetSelection(self,TargetSelection):
self.add_query_param('TargetSelection',TargetSelection)
def get_ScheduleFinishTime(self):
return self.get_query_params().get('ScheduleFinishTime')
def set_ScheduleFinishTime(self,ScheduleFinishTime):
self.add_query_param('ScheduleFinishTime',ScheduleFinishTime)
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_GrayPercent(self):
return self.get_query_params().get('GrayPercent')
def set_GrayPercent(self,GrayPercent):
self.add_query_param('GrayPercent',GrayPercent)
def get_DnListFileUrl(self):
return self.get_query_params().get('DnListFileUrl')
def set_DnListFileUrl(self,DnListFileUrl):
self.add_query_param('DnListFileUrl',DnListFileUrl)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_FirmwareId(self):
return self.get_query_params().get('FirmwareId')
def set_FirmwareId(self,FirmwareId):
self.add_query_param('FirmwareId',FirmwareId)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_RetryInterval(self):
return self.get_query_params().get('RetryInterval')
def set_RetryInterval(self,RetryInterval):
self.add_query_param('RetryInterval',RetryInterval)
def get_SrcVersions(self):
return self.get_query_params().get('SrcVersion')
def set_SrcVersions(self, SrcVersions):
for depth1 in range(len(SrcVersions)):
if SrcVersions[depth1] is not None:
self.add_query_param('SrcVersion.' + str(depth1 + 1) , SrcVersions[depth1])
def get_ScheduleTime(self):
return self.get_query_params().get('ScheduleTime')
def set_ScheduleTime(self,ScheduleTime):
self.add_query_param('ScheduleTime',ScheduleTime)
def get_OverwriteMode(self):
return self.get_query_params().get('OverwriteMode')
def set_OverwriteMode(self,OverwriteMode):
self.add_query_param('OverwriteMode',OverwriteMode)
def get_MaximumPerMinute(self):
return self.get_query_params().get('MaximumPerMinute')
def set_MaximumPerMinute(self,MaximumPerMinute):
self.add_query_param('MaximumPerMinute',MaximumPerMinute)
def get_TargetDeviceNames(self):
return self.get_query_params().get('TargetDeviceName')
def set_TargetDeviceNames(self, TargetDeviceNames):
for depth1 in range(len(TargetDeviceNames)):
if TargetDeviceNames[depth1] is not None:
self.add_query_param('TargetDeviceName.' + str(depth1 + 1) , TargetDeviceNames[depth1]) | [
"[email protected]"
] | |
5414549b6e05db7f00d8fa5cd8e78438012ca3b3 | d1db2d004f989c89d7d7b599a79be73485d15154 | /backend/home/migrations/0001_load_initial_data.py | 1d7f53afab7b8c07ccd47a313d03c168330d2153 | [] | no_license | crowdbotics-apps/dry-glitter-29203 | 79161318f4bc536b1b69e07dfc592f19f4056ce5 | ee14380afe72369a0e7306b5954885f675493020 | refs/heads/master | 2023-06-25T06:39:47.076869 | 2021-07-26T19:47:29 | 2021-07-26T19:47:29 | 389,750,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "dry-glitter-29203.botics.co"
site_params = {
"name": "Dry Glitter",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
a617079a95a7c319e8098fbc2e673c2307de3965 | 028ef598a1da5e865691db12a92a7e894d6a20ed | /server/plugins/item/forms.py | 7309fe19e115890c8f00281a260ba06e230c85c3 | [] | no_license | alvinwan/Puhjiii | e051b39f236525cb8e3cfbdc24fe4bb243708996 | bf633015fe13a7a60cd1137b16b8d2cec6b09d39 | refs/heads/master | 2021-01-22T09:47:55.058092 | 2015-07-10T17:49:43 | 2015-07-10T17:49:43 | 37,454,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from server.forms import DynamicForm
class AddItemForm(DynamicForm):
pass
class EditItemForm(DynamicForm):
pass | [
"[email protected]"
] | |
b2417e1d6d1bbde80e013a3d3c5c891f70809f47 | 38422c3edeb269926502fed31a0761aff8dd3d3b | /Si_and_InGaAs_detectors/Calibration_2Mar_2015/IGA22030TC_caldata_for_python/run_DSS-IGA22030TC_cal_data.py | 77487933e19dc4dedfd57eaf2f4694e28cecc1ca | [] | no_license | vfurtula/Alle-projekter | 2dab3ccbf7ddb6be3ee09f9f5e87085f354dd84a | da3d7c9611088043e2aea5d844f1ae6056215e04 | refs/heads/master | 2022-06-07T05:17:35.327228 | 2020-04-30T10:28:48 | 2020-04-30T10:28:48 | 260,180,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,815 | py | ## Import libraries
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial import polynomial as P
from scipy import interpolate
## For Matplotlib fonts
from matplotlib import rc
## for LaTeX style use:
rc("text", usetex=True)
rc("font", family="serif")
######################################################
# Create folder structure for intput and output data #
######################################################
class Get_DSS_IGA22030TC_new:
def __init__(self,my_string):
# Open new datafile form SOURCE 2 (OLIS)
#my_string='Kalibrering_DSS-S025TC.txt'
#step=5
f2 = open(my_string, 'r')
# Read and ignore header lines
headers = [f2.readline() for i in range(7)]
self.all_data=[]
# Read new datafile
for line in f2:
line = line.strip()
columns = line.split()
self.all_data.extend([ float(columns[0]) ])
f2.close()
self.wl_indx=[]
self.res_m30C_indx=[]
self.err_m30C_indx=[]
self.res_p23C_indx=[]
self.err_p23C_indx=[]
self.wl=[]
self.res_m30C=[]
self.err_m30C=[]
self.res_p23C=[]
self.err_p23C=[]
def getdata(self):
for i in range(len(self.all_data)):
if i*5<len(self.all_data):
self.wl_indx.extend([ i*5 ])
if 1+i*5<len(self.all_data):
self.res_m30C_indx.extend([ 1+i*5 ])
if 2+i*5<len(self.all_data):
self.err_m30C_indx.extend([ 2+i*5 ])
if 3+i*5<len(self.all_data):
self.res_p23C_indx.extend([ 3+i*5 ])
if 4+i*5<len(self.all_data):
self.err_p23C_indx.extend([ 4+i*5 ])
for i in range(len(self.wl_indx)):
self.wl.extend([ self.all_data[self.wl_indx[i]] ])
self.res_m30C.extend([ self.all_data[self.res_m30C_indx[i]] ])
self.err_m30C.extend([ self.all_data[self.err_m30C_indx[i]] ])
self.res_p23C.extend([ self.all_data[self.res_p23C_indx[i]] ])
self.err_p23C.extend([ self.all_data[self.err_p23C_indx[i]] ])
return self.wl, self.res_m30C, self.err_m30C, self.res_p23C, self.err_p23C
class Get_DSS_IGA22030TC_old:
def __init__(self,my_string):
# Open new datafile form SOURCE 2 (OLIS)
#my_string='Kalibrering_DSS-S025TC_HORIBA.txt'
f2 = open(my_string, 'r')
# Read and ignore header lines
headers = [f2.readline() for i in range(3)]
self.wl=[]
self.res_m30C=[]
self.res_p23C=[]
# Read new datafile
for line in f2:
line = line.strip()
columns = line.split()
self.wl.extend([ float(columns[0]) ])
self.res_m30C.extend([ float(columns[2]) ])
self.res_p23C.extend([ float(columns[1]) ])
f2.close()
def getdata(self):
return self.wl, self.res_m30C, self.res_p23C
if __name__ == "__main__":
out_data1=Get_DSS_IGA22030TC_new('Kalibrering_DSS-IGA22030TC.txt').getdata()
out_data2=Get_DSS_IGA22030TC_old('Kalibrering_DSS-IGA22030TC_HORIBA.txt').getdata()
out_data3=Get_DSS_IGA22030TC_new('Kalibrering_DSS-IGA22030TC_crosscheck.txt').getdata()
'''
coef_first = P.polyfit(true_val,Ard_baud_23040,1)
#print "polyfit coef = ", coef
a1=coef_first[1]
b1=coef_first[0]
val_first = [a1*i+b1 for i in true_val]
coef_second = P.polyfit(true_val,Ard_baud_23040,2)
#print "polyfit coef = ", coef
a2=coef_second[2]
b2=coef_second[1]
c2=coef_second[0]
val_second = [a2*i**2+b2*i+c2 for i in true_val]
delta1=[]
delta2=[]
for i in range(len(true_val)):
if i==0:
delta1.extend([ 1 ])
delta2.extend([ 1 ])
else:
delta1.extend([ Ard_baud_23040[i]/true_val[i] ])
delta2.extend([ Ard_baud_230400[i]/true_val[i] ])
f2.close()
'''
# Plot the results
plt.figure(1, figsize=(18,12))
plt.plot(out_data1[0],out_data1[1],'b-',label="-30C, JV, 5 nm step (supply 041004-1)")
up_err_m30=[out_data1[1][i]*(1+out_data1[2][i]/100) for i in range(len(out_data1[1]))]
do_err_m30=[out_data1[1][i]*(1-out_data1[2][i]/100) for i in range(len(out_data1[1]))]
plt.fill_between(out_data1[0], up_err_m30, do_err_m30, facecolor='blue', alpha=0.3)
plt.plot(out_data1[0],out_data1[3],'r-',label="+23C, JV, 5 nm step (supply 041004-1)")
up_err_p23=[out_data1[3][i]*(1+out_data1[4][i]/100) for i in range(len(out_data1[3]))]
do_err_p23=[out_data1[3][i]*(1-out_data1[4][i]/100) for i in range(len(out_data1[3]))]
plt.fill_between(out_data1[0], up_err_p23, do_err_p23, facecolor='red', alpha=0.3)
plt.plot(out_data2[0],out_data2[1],'b--',label="-30C, HORIBA, Oct 2010 (supply unknown)")
plt.plot(out_data2[0],out_data2[2],'r--',label="+23C, HORIBA, Oct 2010 (supply unknown)")
###
plt.plot(out_data3[0],out_data3[1],'bx-',label="-30C, JV, 100 nm step (supply 031113-2)")
#up_err_m30=[out_data3[1][i]*(1+out_data3[2][i]/100) for i in range(len(out_data3[1]))]
#do_err_m30=[out_data3[1][i]*(1-out_data3[2][i]/100) for i in range(len(out_data3[1]))]
#plt.fill_between(out_data3[0], up_err_m30, do_err_m30, facecolor='yellow', alpha=0.3)
plt.plot(out_data3[0],out_data3[3],'rx-',label="+23C, JV, 100 nm step (supply 031113-2)")
#up_err_p23=[out_data3[3][i]*(1+out_data3[4][i]/100) for i in range(len(out_data3[3]))]
#do_err_p23=[out_data3[3][i]*(1-out_data3[4][i]/100) for i in range(len(out_data3[3]))]
#plt.fill_between(out_data3[0], up_err_p23, do_err_p23, facecolor='green', alpha=0.3)
plt.xlabel("Wavelength [nm]", fontsize=20)
plt.ylabel("Responsivity [A/W]", fontsize=20)
plt.tick_params(axis="both", labelsize=20)
plt.title('Calibration of DSS-IGA22030TC (Serial No. 021147) at Justervesenet (JV), Jan 2015')
#plt.yticks( np.linspace(0,1,11) )
#plt.xticks( np.linspace(0,11000,12) )
#plt.ylim([0,1])
#plt.xlim([0,11000])
l=plt.legend(loc=2, fontsize=15)
l.draw_frame(False)
plt.savefig('DSS-IGA22030TC_calplots.pdf')
plt.show()
| [
"[email protected]"
] | |
bab675eb57306cc67946459ee039be109cb91a15 | 810ce1c1ac47743e253171ec7541c0e431d952c2 | /standard_library/Concurrency/Subprocess/subprocess_popen.py | d43d2ee87874e36321a9f663df2059047f4a48f9 | [] | no_license | hjlarry/practise-py | 91052c25dc7ab706c6234f6d657db76667a27124 | 871e06b9652d356f55e3888f1f7ea180ac2b1954 | refs/heads/master | 2022-09-11T17:47:48.557194 | 2022-08-10T02:07:24 | 2022-08-10T02:07:24 | 136,263,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | import subprocess
import io
print("一、 与进程单向通信")
print("read:")
proc = subprocess.Popen(["echo", "to stdout"], stdout=subprocess.PIPE)
# proc = subprocess.Popen(["ls", "-l"], stdout=subprocess.PIPE)
value = proc.communicate()
print(value)
stdout_value = value[0].decode("utf-8")
print(stdout_value)
print("write:")
proc = subprocess.Popen(["cat", "-"], stdin=subprocess.PIPE)
proc.communicate("stdin:sth".encode("utf-8"))
print()
print()
print("二、 与进程双向通信:")
proc = subprocess.Popen(
'cat -; echo "to stderr" 1>&2',
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
msg = "through stdin to stdout".encode("utf-8")
stdout_value, stderr_value = proc.communicate(msg)
print(stdout_value)
print(stderr_value)
print()
print("三、 管道连接:")
# 相当于 $cat signal.ipynb | grep "def" | cut -b -30
cat = subprocess.Popen(["cat", "subprocess.ipynb"], stdout=subprocess.PIPE)
grep = subprocess.Popen(["grep", "def"], stdin=cat.stdout, stdout=subprocess.PIPE)
cut = subprocess.Popen(["cut", "-b", "-30"], stdin=grep.stdout, stdout=subprocess.PIPE)
for line in cut.stdout:
print(line)
print()
print("四、 与另一个命令行去交互:")
print("one line at a time:")
proc = subprocess.Popen(
"python3 repeater.py", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
stdin = io.TextIOWrapper(proc.stdin, encoding="utf-8", line_buffering=True)
stdout = io.TextIOWrapper(proc.stdout, encoding="utf-8")
for i in range(5):
line = f"{i} \n"
stdin.write(line)
output = stdout.readline()
print(output)
remainder = proc.communicate()[0].decode("utf-8")
print(remainder)
print()
print("All line at a time:")
proc = subprocess.Popen(
"python3 repeater.py", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
stdin = io.TextIOWrapper(proc.stdin, encoding="utf-8")
stdout = io.TextIOWrapper(proc.stdout, encoding="utf-8")
for i in range(5):
line = f"{i} \n"
stdin.write(line)
stdin.flush()
remainder = proc.communicate()[0].decode("utf-8")
print(remainder)
| [
"[email protected]"
] | |
51402dc7abea0b29512fea65e9aa3d0f2b55e68b | 1c91439673c898c2219ee63750ea05ff847faee1 | /downstream/mmsegmentation/mmseg/datasets/pipelines/transforms.py | 4fdfaef45591127a88b390ad6e55cf4cc865af0c | [
"Apache-2.0"
] | permissive | ChenhongyiYang/GPViT | d7ba7f00d5139a989a999664ab0874c5c9d53d4d | 2b8882b2da41d4e175fe49a33fcefad1423216f4 | refs/heads/main | 2023-06-08T00:10:07.319078 | 2023-05-26T15:52:54 | 2023-05-26T15:52:54 | 577,075,781 | 78 | 2 | null | null | null | null | UTF-8 | Python | false | false | 57,737 | py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import mmcv
import numpy as np
from mmcv.utils import deprecated_api_warning, is_tuple_of
from numpy import random
from ..builder import PIPELINES
@PIPELINES.register_module()
class AlignedResize(object):
"""Resize images & seg. Align
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
size_divisor=32):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given img_scale=None and a range of image ratio
# mode 2: given a scale and a range of image ratio
assert self.img_scale is None or len(self.img_scale) == 1
else:
# mode 3 and 4: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
self.size_divisor = size_divisor
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
if self.img_scale is None:
h, w = results['img'].shape[:2]
scale, scale_idx = self.random_sample_ratio((w, h),
self.ratio_range)
else:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _align(self, img, size_divisor, interpolation=None):
align_h = int(np.ceil(img.shape[0] / size_divisor)) * size_divisor
align_w = int(np.ceil(img.shape[1] / size_divisor)) * size_divisor
if interpolation == None:
img = mmcv.imresize(img, (align_w, align_h))
else:
img = mmcv.imresize(img, (align_w, align_h), interpolation=interpolation)
return img
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
#### align ####
img = self._align(img, self.size_divisor)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
h, w = img.shape[:2]
assert int(np.ceil(h / self.size_divisor)) * self.size_divisor == h and \
int(np.ceil(w / self.size_divisor)) * self.size_divisor == w, \
"img size not align. h:{} w:{}".format(h,w)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
gt_seg = self._align(gt_seg, self.size_divisor, interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
h, w = gt_seg.shape[:2]
assert int(np.ceil(h / self.size_divisor)) * self.size_divisor == h and \
int(np.ceil(w / self.size_divisor)) * self.size_divisor == w, \
"gt_seg size not align. h:{} w:{}".format(h, w)
results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
@PIPELINES.register_module()
class ResizeToMultiple(object):
"""Resize images & seg to multiple of divisor.
Args:
size_divisor (int): images and gt seg maps need to resize to multiple
of size_divisor. Default: 32.
interpolation (str, optional): The interpolation mode of image resize.
Default: None
"""
def __init__(self, size_divisor=32, interpolation=None):
self.size_divisor = size_divisor
self.interpolation = interpolation
def __call__(self, results):
"""Call function to resize images, semantic segmentation map to
multiple of size divisor.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape' keys are updated.
"""
# Align image to multiple of size divisor.
img = results['img']
img = mmcv.imresize_to_multiple(
img,
self.size_divisor,
scale_factor=1,
interpolation=self.interpolation
if self.interpolation else 'bilinear')
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape
# Align segmentation map to multiple of size divisor.
for key in results.get('seg_fields', []):
gt_seg = results[key]
gt_seg = mmcv.imresize_to_multiple(
gt_seg,
self.size_divisor,
scale_factor=1,
interpolation='nearest')
results[key] = gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(size_divisor={self.size_divisor}, '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Resize(object):
"""Resize images & seg.
This transform resizes the input image to some scale. If the input dict
contains the key "scale", then the scale in the input dict is used,
otherwise the specified scale in the init method is used.
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
(multi-scale). There are 4 multiscale modes:
- ``ratio_range is not None``:
1. When img_scale is None, img_scale is the shape of image in results
(img_scale = results['img'].shape[:2]) and the image is resized based
on the original size. (mode 1)
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
the ratio range and multiply it with the image scale. (mode 2)
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
scale from the a range. (mode 3)
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
scale from multiple scales. (mode 4)
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
Default:None.
multiscale_mode (str): Either "range" or "value".
Default: 'range'
ratio_range (tuple[float]): (min_ratio, max_ratio).
Default: None
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image. Default: True
min_size (int, optional): The minimum size for input and the shape
of the image and seg map will not be less than ``min_size``.
As the shape of model input is fixed like 'SETR' and 'BEiT'.
Following the setting in these models, resized images must be
bigger than the crop size in ``slide_inference``. Default: None
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
min_size=None):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given img_scale=None and a range of image ratio
# mode 2: given a scale and a range of image ratio
assert self.img_scale is None or len(self.img_scale) == 1
else:
# mode 3 and 4: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
self.min_size = min_size
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
if self.img_scale is None:
h, w = results['img'].shape[:2]
scale, scale_idx = self.random_sample_ratio((w, h),
self.ratio_range)
else:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
if self.min_size is not None:
# TODO: Now 'min_size' is an 'int' which means the minimum
# shape of images is (min_size, min_size, 3). 'min_size'
# with tuple type will be supported, i.e. the width and
# height are not equal.
if min(results['scale']) < self.min_size:
new_short = self.min_size
else:
new_short = min(results['scale'])
h, w = results['img'].shape[:2]
if h > w:
new_h, new_w = new_short * h / w, new_short
else:
new_h, new_w = new_short, new_short * w / h
results['scale'] = (new_h, new_w)
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomFlip(object):
"""Flip the image & seg.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
prob (float, optional): The flipping probability. Default: None.
direction(str, optional): The flipping direction. Options are
'horizontal' and 'vertical'. Default: 'horizontal'.
"""
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
def __init__(self, prob=None, direction='horizontal'):
self.prob = prob
self.direction = direction
if prob is not None:
assert prob >= 0 and prob <= 1
assert direction in ['horizontal', 'vertical']
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added into
result dict.
"""
if 'flip' not in results:
flip = True if np.random.rand() < self.prob else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
# flip image
results['img'] = mmcv.imflip(
results['img'], direction=results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
# use copy() to make numpy stride positive
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction']).copy()
return results
def __repr__(self):
return self.__class__.__name__ + f'(prob={self.prob})'
@PIPELINES.register_module()
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_val=0,
seg_pad_val=255):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
if self.size is not None:
padded_img = mmcv.impad(
results['img'], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_seg(self, results):
"""Pad masks according to ``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key],
shape=results['pad_shape'][:2],
pad_val=self.seg_pad_val)
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
f'{self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class Rerange(object):
"""Rerange the image pixel value.
Args:
min_value (float or int): Minimum value of the reranged image.
Default: 0.
max_value (float or int): Maximum value of the reranged image.
Default: 255.
"""
def __init__(self, min_value=0, max_value=255):
assert isinstance(min_value, float) or isinstance(min_value, int)
assert isinstance(max_value, float) or isinstance(max_value, int)
assert min_value < max_value
self.min_value = min_value
self.max_value = max_value
def __call__(self, results):
"""Call function to rerange images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Reranged results.
"""
img = results['img']
img_min_value = np.min(img)
img_max_value = np.max(img)
assert img_min_value < img_max_value
# rerange to [0, 1]
img = (img - img_min_value) / (img_max_value - img_min_value)
# rerange to [min_value, max_value]
img = img * (self.max_value - self.min_value) + self.min_value
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
return repr_str
@PIPELINES.register_module()
class CLAHE(object):
"""Use CLAHE method to process the image.
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
Graphics Gems, 1994:474-485.` for more information.
Args:
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
Input image will be divided into equally sized rectangular tiles.
It defines the number of tiles in row and column. Default: (8, 8).
"""
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
assert isinstance(clip_limit, (float, int))
self.clip_limit = clip_limit
assert is_tuple_of(tile_grid_size, int)
assert len(tile_grid_size) == 2
self.tile_grid_size = tile_grid_size
def __call__(self, results):
"""Call function to Use CLAHE method process images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
for i in range(results['img'].shape[2]):
results['img'][:, :, i] = mmcv.clahe(
np.array(results['img'][:, :, i], dtype=np.uint8),
self.clip_limit, self.tile_grid_size)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(clip_limit={self.clip_limit}, '\
f'tile_grid_size={self.tile_grid_size})'
return repr_str
@PIPELINES.register_module()
class RandomCrop(object):
"""Random crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
cat_max_ratio (float): The maximum ratio that single category could
occupy.
"""
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
assert crop_size[0] > 0 and crop_size[1] > 0
self.crop_size = crop_size
self.cat_max_ratio = cat_max_ratio
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results['img']
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(
cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
img = self.crop(img, crop_bbox)
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = self.crop(results[key], crop_bbox)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class RandomRotate(object):
"""Rotate the image & seg.
Args:
prob (float): The rotation probability.
degree (float, tuple[float]): Range of degrees to select from. If
degree is a number instead of tuple like (min, max),
the range of degree will be (``-degree``, ``+degree``)
pad_val (float, optional): Padding value of image. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
center (tuple[float], optional): Center point (w, h) of the rotation in
the source image. If not specified, the center of the image will be
used. Default: None.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image. Default: False
"""
def __init__(self,
prob,
degree,
pad_val=0,
seg_pad_val=255,
center=None,
auto_bound=False):
self.prob = prob
assert prob >= 0 and prob <= 1
if isinstance(degree, (float, int)):
assert degree > 0, f'degree {degree} should be positive'
self.degree = (-degree, degree)
else:
self.degree = degree
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
f'tuple of (min, max)'
self.pal_val = pad_val
self.seg_pad_val = seg_pad_val
self.center = center
self.auto_bound = auto_bound
def __call__(self, results):
"""Call function to rotate image, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
"""
rotate = True if np.random.rand() < self.prob else False
degree = np.random.uniform(min(*self.degree), max(*self.degree))
if rotate:
# rotate image
results['img'] = mmcv.imrotate(
results['img'],
angle=degree,
border_value=self.pal_val,
center=self.center,
auto_bound=self.auto_bound)
# rotate segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imrotate(
results[key],
angle=degree,
border_value=self.seg_pad_val,
center=self.center,
auto_bound=self.auto_bound,
interpolation='nearest')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, ' \
f'degree={self.degree}, ' \
f'pad_val={self.pal_val}, ' \
f'seg_pad_val={self.seg_pad_val}, ' \
f'center={self.center}, ' \
f'auto_bound={self.auto_bound})'
return repr_str
@PIPELINES.register_module()
class RGB2Gray(object):
"""Convert RGB image to grayscale image.
This transform calculate the weighted mean of input image channels with
``weights`` and then expand the channels to ``out_channels``. When
``out_channels`` is None, the number of output channels is the same as
input channels.
Args:
out_channels (int): Expected number of output channels after
transforming. Default: None.
weights (tuple[float]): The weights to calculate the weighted mean.
Default: (0.299, 0.587, 0.114).
"""
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
assert out_channels is None or out_channels > 0
self.out_channels = out_channels
assert isinstance(weights, tuple)
for item in weights:
assert isinstance(item, (float, int))
self.weights = weights
def __call__(self, results):
"""Call function to convert RGB image to grayscale image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with grayscale image.
"""
img = results['img']
assert len(img.shape) == 3
assert img.shape[2] == len(self.weights)
weights = np.array(self.weights).reshape((1, 1, -1))
img = (img * weights).sum(2, keepdims=True)
if self.out_channels is None:
img = img.repeat(weights.shape[2], axis=2)
else:
img = img.repeat(self.out_channels, axis=2)
results['img'] = img
results['img_shape'] = img.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(out_channels={self.out_channels}, ' \
f'weights={self.weights})'
return repr_str
@PIPELINES.register_module()
class AdjustGamma(object):
"""Using gamma correction to process the image.
Args:
gamma (float or int): Gamma value used in gamma correction.
Default: 1.0.
"""
def __init__(self, gamma=1.0):
assert isinstance(gamma, float) or isinstance(gamma, int)
assert gamma > 0
self.gamma = gamma
inv_gamma = 1.0 / gamma
self.table = np.array([(i / 255.0)**inv_gamma * 255
for i in np.arange(256)]).astype('uint8')
def __call__(self, results):
"""Call function to process the image with gamma correction.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
results['img'] = mmcv.lut_transform(
np.array(results['img'], dtype=np.uint8), self.table)
return results
def __repr__(self):
return self.__class__.__name__ + f'(gamma={self.gamma})'
@PIPELINES.register_module()
class SegRescale(object):
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
"""
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key], self.scale_factor, interpolation='nearest')
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(2):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(2):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = mmcv.hsv2bgr(img)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-self.hue_delta, self.hue_delta)) % 180
img = mmcv.hsv2bgr(img)
return img
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
img = results['img']
# random brightness
img = self.brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
img = self.contrast(img)
# random saturation
img = self.saturation(img)
# random hue
img = self.hue(img)
# random contrast
if mode == 0:
img = self.contrast(img)
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(brightness_delta={self.brightness_delta}, '
f'contrast_range=({self.contrast_lower}, '
f'{self.contrast_upper}), '
f'saturation_range=({self.saturation_lower}, '
f'{self.saturation_upper}), '
f'hue_delta={self.hue_delta})')
return repr_str
@PIPELINES.register_module()
class RandomCutOut(object):
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Args:
prob (float): cutout probability.
n_holes (int | tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
shape of dropped regions. It can be `tuple[int, int]` to use a
fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
shape from the list.
cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
candidate ratio of dropped regions. It can be `tuple[float, float]`
to use a fixed ratio or `list[tuple[float, float]]` to randomly
choose ratio from the list. Please note that `cutout_shape`
and `cutout_ratio` cannot be both given at the same time.
fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Default: (0, 0, 0).
seg_fill_in (int): The labels of pixel to fill in the dropped regions.
If seg_fill_in is None, skip. Default: None.
"""
def __init__(self,
prob,
n_holes,
cutout_shape=None,
cutout_ratio=None,
fill_in=(0, 0, 0),
seg_fill_in=None):
assert 0 <= prob and prob <= 1
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
if seg_fill_in is not None:
assert (isinstance(seg_fill_in, int) and 0 <= seg_fill_in
and seg_fill_in <= 255)
self.prob = prob
self.n_holes = n_holes
self.fill_in = fill_in
self.seg_fill_in = seg_fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
if not isinstance(self.candidates, list):
self.candidates = [self.candidates]
def __call__(self, results):
"""Call function to drop some regions of image."""
cutout = True if np.random.rand() < self.prob else False
if cutout:
h, w, c = results['img'].shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
for _ in range(n_holes):
x1 = np.random.randint(0, w)
y1 = np.random.randint(0, h)
index = np.random.randint(0, len(self.candidates))
if not self.with_ratio:
cutout_w, cutout_h = self.candidates[index]
else:
cutout_w = int(self.candidates[index][0] * w)
cutout_h = int(self.candidates[index][1] * h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
results['img'][y1:y2, x1:x2, :] = self.fill_in
if self.seg_fill_in is not None:
for key in results.get('seg_fields', []):
results[key][y1:y2, x1:x2] = self.seg_fill_in
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, '
repr_str += f'n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in}, '
repr_str += f'seg_fill_in={self.seg_fill_in})'
return repr_str
@PIPELINES.register_module()
class RandomMosaic(object):
"""Mosaic augmentation. Given 4 images, mosaic transform combines them into
one output image. The output image is composed of the parts from each sub-
image.
.. code:: text
mosaic transform
center_x
+------------------------------+
| pad | pad |
| +-----------+ |
| | | |
| | image1 |--------+ |
| | | | |
| | | image2 | |
center_y |----+-------------+-----------|
| | cropped | |
|pad | image3 | image4 |
| | | |
+----|-------------+-----------+
| |
+-------------+
The mosaic transform steps are as follows:
1. Choose the mosaic center as the intersections of 4 images
2. Get the left top image according to the index, and randomly
sample another 3 images from the custom dataset.
3. Sub image will be cropped if image is larger than mosaic patch
Args:
prob (float): mosaic probability.
img_scale (Sequence[int]): Image size after mosaic pipeline of
a single image. The size of the output image is four times
that of a single image. The output image comprises 4 single images.
Default: (640, 640).
center_ratio_range (Sequence[float]): Center ratio range of mosaic
output. Default: (0.5, 1.5).
pad_val (int): Pad value. Default: 0.
seg_pad_val (int): Pad value of segmentation map. Default: 255.
"""
def __init__(self,
prob,
img_scale=(640, 640),
center_ratio_range=(0.5, 1.5),
pad_val=0,
seg_pad_val=255):
assert 0 <= prob and prob <= 1
assert isinstance(img_scale, tuple)
self.prob = prob
self.img_scale = img_scale
self.center_ratio_range = center_ratio_range
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
def __call__(self, results):
"""Call function to make a mosaic of image.
Args:
results (dict): Result dict.
Returns:
dict: Result dict with mosaic transformed.
"""
mosaic = True if np.random.rand() < self.prob else False
if mosaic:
results = self._mosaic_transform_img(results)
results = self._mosaic_transform_seg(results)
return results
def get_indexes(self, dataset):
"""Call function to collect indexes.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
list: indexes.
"""
indexes = [random.randint(0, len(dataset)) for _ in range(3)]
return indexes
def _mosaic_transform_img(self, results):
"""Mosaic transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
assert 'mix_results' in results
if len(results['img'].shape) == 3:
mosaic_img = np.full(
(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3),
self.pad_val,
dtype=results['img'].dtype)
else:
mosaic_img = np.full(
(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)),
self.pad_val,
dtype=results['img'].dtype)
# mosaic center x, y
self.center_x = int(
random.uniform(*self.center_ratio_range) * self.img_scale[1])
self.center_y = int(
random.uniform(*self.center_ratio_range) * self.img_scale[0])
center_position = (self.center_x, self.center_y)
loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')
for i, loc in enumerate(loc_strs):
if loc == 'top_left':
result_patch = copy.deepcopy(results)
else:
result_patch = copy.deepcopy(results['mix_results'][i - 1])
img_i = result_patch['img']
h_i, w_i = img_i.shape[:2]
# keep_ratio resize
scale_ratio_i = min(self.img_scale[0] / h_i,
self.img_scale[1] / w_i)
img_i = mmcv.imresize(
img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))
# compute the combine parameters
paste_coord, crop_coord = self._mosaic_combine(
loc, center_position, img_i.shape[:2][::-1])
x1_p, y1_p, x2_p, y2_p = paste_coord
x1_c, y1_c, x2_c, y2_c = crop_coord
# crop and paste image
mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]
results['img'] = mosaic_img
results['img_shape'] = mosaic_img.shape
results['ori_shape'] = mosaic_img.shape
return results
def _mosaic_transform_seg(self, results):
"""Mosaic transform function for label annotations.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
assert 'mix_results' in results
for key in results.get('seg_fields', []):
mosaic_seg = np.full(
(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)),
self.seg_pad_val,
dtype=results[key].dtype)
# mosaic center x, y
center_position = (self.center_x, self.center_y)
loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')
for i, loc in enumerate(loc_strs):
if loc == 'top_left':
result_patch = copy.deepcopy(results)
else:
result_patch = copy.deepcopy(results['mix_results'][i - 1])
gt_seg_i = result_patch[key]
h_i, w_i = gt_seg_i.shape[:2]
# keep_ratio resize
scale_ratio_i = min(self.img_scale[0] / h_i,
self.img_scale[1] / w_i)
gt_seg_i = mmcv.imresize(
gt_seg_i,
(int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)),
interpolation='nearest')
# compute the combine parameters
paste_coord, crop_coord = self._mosaic_combine(
loc, center_position, gt_seg_i.shape[:2][::-1])
x1_p, y1_p, x2_p, y2_p = paste_coord
x1_c, y1_c, x2_c, y2_c = crop_coord
# crop and paste image
mosaic_seg[y1_p:y2_p, x1_p:x2_p] = gt_seg_i[y1_c:y2_c,
x1_c:x2_c]
results[key] = mosaic_seg
return results
def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):
"""Calculate global coordinate of mosaic image and local coordinate of
cropped sub-image.
Args:
loc (str): Index for the sub-image, loc in ('top_left',
'top_right', 'bottom_left', 'bottom_right').
center_position_xy (Sequence[float]): Mixing center for 4 images,
(x, y).
img_shape_wh (Sequence[int]): Width and height of sub-image
Returns:
tuple[tuple[float]]: Corresponding coordinate of pasting and
cropping
- paste_coord (tuple): paste corner coordinate in mosaic image.
- crop_coord (tuple): crop corner coordinate in mosaic image.
"""
assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')
if loc == 'top_left':
# index0 to top left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
max(center_position_xy[1] - img_shape_wh[1], 0), \
center_position_xy[0], \
center_position_xy[1]
crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (
y2 - y1), img_shape_wh[0], img_shape_wh[1]
elif loc == 'top_right':
# index1 to top right part of image
x1, y1, x2, y2 = center_position_xy[0], \
max(center_position_xy[1] - img_shape_wh[1], 0), \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[1] * 2), \
center_position_xy[1]
crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(
img_shape_wh[0], x2 - x1), img_shape_wh[1]
elif loc == 'bottom_left':
# index2 to bottom left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
center_position_xy[1], \
center_position_xy[0], \
min(self.img_scale[0] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(
y2 - y1, img_shape_wh[1])
else:
# index3 to bottom right part of image
x1, y1, x2, y2 = center_position_xy[0], \
center_position_xy[1], \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[1] * 2), \
min(self.img_scale[0] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = 0, 0, min(img_shape_wh[0],
x2 - x1), min(y2 - y1, img_shape_wh[1])
paste_coord = x1, y1, x2, y2
return paste_coord, crop_coord
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, '
repr_str += f'img_scale={self.img_scale}, '
repr_str += f'center_ratio_range={self.center_ratio_range}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'seg_pad_val={self.pad_val})'
return repr_str
| [
"[email protected]"
] | |
3e2a1a8413800f85f2a56ec57d1eb41f78af3a63 | 926b3c52070f6e309567c8598248fd5c57095be9 | /src/mmdeploy/mmdeploy/codebase/mmcls/models/heads/multi_label_head.py | 7a5d63375aa42db93fccdde1904c7945f465bc96 | [
"Apache-2.0"
] | permissive | fengbingchun/PyTorch_Test | 410f7cd2303707b0141d433fb9d144a961e1f4c8 | df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348 | refs/heads/master | 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 | C++ | UTF-8 | Python | false | false | 714 | py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmcls.models.heads.MultiLabelClsHead.post_process')
def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):
"""Rewrite `post_process` of MultiLabelClsHead for default backend.
Rewrite this function to directly return pred.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
pred (Tensor): Predict result of model.
Returns:
pred (Tensor): Result of MultiLabelClsHead. The tensor
shape (batch_size,num_classes).
"""
return pred
| [
"[email protected]"
] | |
090ec55ee38d15f5b51f50a928495db00fce01bc | 048c6b84e679a3e81bf7b4980ad2b4a99781b9b7 | /quantarhei/core/implementations.py | c2fb6b38c5bfa8bd44a0d92d5b3f9187fecc4b07 | [] | no_license | saayeh/quantarhei | 9b7a7c60e1325ef783bdbc9ac4b6f33a13301802 | b77a41272b7df0ccbcde2710bf04bf412c126a6f | refs/heads/master | 2020-12-07T06:29:27.954470 | 2017-09-01T21:09:45 | 2017-09-01T21:09:45 | 66,932,421 | 0 | 0 | null | 2016-08-30T10:52:11 | 2016-08-30T10:52:11 | null | UTF-8 | Python | false | false | 3,130 | py | # -*- coding: utf-8 -*-
from functools import wraps
import os
from importlib import import_module
from .managers import Manager
def implementation(package="",
taskname="",
at_runtime=False,
fallback_local=False,
always_local=False):
"""Decorator to select numerical implememtation
"""
m = Manager()
def decorate_at_runtime(func):
"""Decoration at run time
The wrapper decides which function to return at runtime.
"""
@wraps(func)
def wrapper(*arg,**kwargs):
fc = get_function(func,package,taskname,
default_local=fallback_local,
always_local=always_local)
return fc(*arg,**kwargs)
return wrapper
def decorate_at_loadtime(func):
"""Decoration at load time
The wrapper decides which function to return when the Manager module
is loaded, i.e. at the start of the application.
"""
fc = get_function(func,package,taskname,
default_local=fallback_local,
always_local=always_local)
@wraps(func)
def wrapper(*arg,**kwargs):
return fc(*arg,**kwargs)
return wrapper
if (at_runtime and m.change_implementation_at_runtime):
return decorate_at_runtime
else:
return decorate_at_loadtime
#
# Auxiliary function
#
def load_function(lib,fce):
"""Load the module and get the desired function
"""
a = import_module(lib)
if hasattr(a,fce):
fc = getattr(a,fce)
else:
raise Exception("Cannot reach implementation of %s " % fce)
return fc
def get_function(func,package,taskname,default_local,always_local):
"""Decide which function to use
"""
if always_local:
return func
m = Manager()
# default implementation package
default_imp_prefix = "quantarhei.implementations.python"
# decide which implementation will be used
imp_prefix = m.get_implementation_prefix(package=package,
taskname=taskname)
# load the package
try:
imp_name = imp_prefix + "." + package
fc = load_function(imp_name,taskname)
except:
try:
# fall back on pure Python implementation
if default_local:
fc = func
else:
imp_name = default_imp_prefix + "." + package
fc = load_function(imp_name,taskname)
# FIXME: issue a warning
print("WARNING: import failed, falling back on pure Python")
except:
# do not provide implementation, call the decorated function itself
# FIXME: issue a warning (this is an unwanted result)
fc = func
return fc
| [
"[email protected]"
] | |
68bacba70b10cde713891d28ded05e5009dbe565 | 1e013dc5f0de0f61e27f2867557803a01c01f4da | /Language/python/module/pybluez/rfcomm-client.py | 87fe97bf58fe5eb0164caa0cd0eaa19a542ffacb | [] | no_license | chengyi818/kata | a2941ce8675c6e7a47169a0eae4c757d3f6f5bf9 | a7cb7ad499037bcc168aaa0eaba857b33c04ef14 | refs/heads/master | 2023-04-10T18:39:09.518433 | 2023-01-08T15:22:12 | 2023-01-08T15:22:12 | 53,040,540 | 1 | 0 | null | 2023-03-25T00:46:51 | 2016-03-03T10:06:58 | C++ | UTF-8 | Python | false | false | 193 | py | import bluetooth
server_address = "00:1A:7D:DA:71:11"
port = 1
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((server_address, port))
sock.send("hello world!")
sock.close()
| [
"[email protected]"
] | |
7c693aa34116f1ed4608e268b1f0fd1a69410f5d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03693/s330550034.py | 8eef2972f7aa05280eb36458f486975c68e02421 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | print(['NO','YES'][int(''.join(input().split()))%4==0]) | [
"[email protected]"
] | |
8cc11edbf4514684f0ccebeb30a0086a8925dce2 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21649.py | 40532dcb7c23f6e6d1294bf9a3247202883f3fe7 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # How to use regular expressions to only capture a word by itself rather than in another word?
import re
print re.subn('Co$','',"Company & Co")
| [
"[email protected]"
] | |
02274a6e349253d379c133717b79435475122281 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /SimG4CMS/Calo/test/python/runWithGun_cfg.py | bd29197ff9ce3cb18bf543be313d6be973a0ff76 | [] | permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 6,352 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimG4CMS.Calo.pythiapdt_cfi")
#process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryAPD1XML_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.load("SimG4CMS.Calo.CaloSimHitStudy_cfi")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('CaloSim', 'EcalGeom', 'EcalSim',
'HCalGeom', 'HcalSim', 'HFShower',
'SimG4CoreApplication', 'HitStudy',
'G4cout', 'G4cerr', 'SimTrackManager'),
# debugModules = cms.untracked.vstring('*'),
cout = cms.untracked.PSet(
# threshold = cms.untracked.string('DEBUG'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
G4cerr = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
G4cout = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
SimTrackManager = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
SimG4CoreApplication = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HitStudy = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
CaloSim = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
EcalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
EcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
HCalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HFShower = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
)
)
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(211),
MinEta = cms.double(-3.0),
MaxEta = cms.double(3.0),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinPt = cms.double(100.),
MaxPt = cms.double(100.)
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('simevent_QGSP_FTFP_BERT_EML.root')
)
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
oncePerEventMode = cms.untracked.bool(True),
showMallocInfo = cms.untracked.bool(True),
dump = cms.untracked.bool(True),
ignoreTotal = cms.untracked.int32(1)
)
process.Tracer = cms.Service("Tracer")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('runWithGun_QGSP_FTFP_BERT_EML.root')
)
process.common_maximum_timex = cms.PSet(
MaxTrackTime = cms.double(1000.0),
MaxTimeNames = cms.vstring(),
MaxTrackTimes = cms.vdouble()
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits*process.caloSimHitStudy)
process.outpath = cms.EndPath(process.o1)
process.caloSimHitStudy.MaxEnergy = 1000.0
#process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.Physics.MonopoleCharge = 1
process.g4SimHits.Physics.Verbosity = 0
process.g4SimHits.CaloSD.UseResponseTables = [1,1,0,1]
process.g4SimHits.CaloSD.EminHits[0] = 0
process.g4SimHits.ECalSD.StoreSecondary = True
process.g4SimHits.CaloTrkProcessing.PutHistory = True
process.g4SimHits.CaloResponse.UseResponseTable = True
process.g4SimHits.CaloResponse.ResponseScale = 1.0
process.g4SimHits.CaloResponse.ResponseFile = 'SimG4CMS/Calo/data/responsTBpim50.dat'
process.g4SimHits.G4Commands = ['/run/verbose 2']
process.g4SimHits.StackingAction = cms.PSet(
process.common_heavy_suppression,
process.common_maximum_timex,
KillDeltaRay = cms.bool(True),
TrackNeutrino = cms.bool(False),
KillHeavy = cms.bool(False),
SaveFirstLevelSecondary = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInTracker = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInCalo = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInMuon = cms.untracked.bool(True)
)
process.g4SimHits.SteppingAction = cms.PSet(
process.common_maximum_timex,
KillBeamPipe = cms.bool(False),
CriticalEnergyForVacuum = cms.double(0.0),
CriticalDensity = cms.double(1e-15),
EkinNames = cms.vstring(),
EkinThresholds = cms.vdouble(),
EkinParticles = cms.vstring(),
Verbosity = cms.untracked.int32(2)
)
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
CheckForHighEtPhotons = cms.untracked.bool(False),
TrackMin = cms.untracked.int32(0),
TrackMax = cms.untracked.int32(0),
TrackStep = cms.untracked.int32(1),
EventMin = cms.untracked.int32(0),
EventMax = cms.untracked.int32(0),
EventStep = cms.untracked.int32(1),
PDGids = cms.untracked.vint32(),
VerboseLevel = cms.untracked.int32(0),
G4Verbose = cms.untracked.bool(True),
DEBUG = cms.untracked.bool(False),
type = cms.string('TrackingVerboseAction')
))
| [
"[email protected]"
] | |
f1e2287dae490a131bbd72f576a927f9b633b777 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_25/models/file_system_response.py | c255a82c3b1ae1924c1debaa8c376982be89c983 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,913 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class FileSystemResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[FileSystem]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.FileSystem]
):
"""
Keyword args:
items (list[FileSystem]): Displays a list of all items after filtering. If applicable, the values are displayed for each name.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d4f9618477330f0db7a60c5a90a8a20f134850ae | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_360/ch27_2020_03_30_19_57_47_608648.py | c67301045edffce312884d90b151fd1e85029789 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | tem_duvida = True
while tem_duvida:
perg = input("Tem dúvidas?")
if perg != 'não':
print("Pratique mais")
else:
print('Até a próxima')
tem_duvidas = False
| [
"[email protected]"
] | |
df6a6d9224d26ef500b67da072b762c5934a7c6d | 7a3e9d88b21ef7e4b73d0632e08546d65a9df2ca | /modules/templates/DRKCM/rheaders.py | 9ec418a8de18a49907d6b99decdfd4978f33eed3 | [
"MIT"
] | permissive | nursix/drkcm | 64eeb8ead30784d379d64a0ba2bc2c93bcafb8ca | 7ec4b959d009daf26d5ca6ce91dd9c3c0bd978d6 | refs/heads/master | 2023-09-04T10:07:52.596460 | 2023-09-04T00:43:45 | 2023-09-04T00:43:45 | 97,222,001 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,175 | py | """
Custom rheaders for DRKCM
License: MIT
"""
from gluon import current, A, DIV, SPAN, URL
# =============================================================================
def drk_cr_rheader(r, tabs=None):
""" CR custom resource headers """
if r.representation != "html":
# Resource headers only used in interactive views
return None
from core import s3_rheader_resource, S3ResourceHeader
tablename, record = s3_rheader_resource(r)
if tablename != r.tablename:
resource = current.s3db.resource(tablename, id=record.id)
else:
resource = r.resource
rheader = None
rheader_fields = []
if record:
T = current.T
if tablename == "cr_shelter":
if not tabs:
tabs = [(T("Basic Details"), None),
]
rheader_fields = [["name",
],
["organisation_id",
],
["location_id",
],
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
table=resource.table,
record=record,
)
return rheader
# =============================================================================
def drk_dvr_rheader(r, tabs=None):
""" DVR custom resource headers """
if r.representation != "html":
# Resource headers only used in interactive views
return None
from core import s3_rheader_resource, \
S3ResourceHeader, \
s3_fullname
from .uioptions import get_ui_options
tablename, record = s3_rheader_resource(r)
if tablename != r.tablename:
resource = current.s3db.resource(tablename, id=record.id)
else:
resource = r.resource
rheader = None
rheader_fields = []
if record:
T = current.T
record_id = record.id
if tablename == "pr_person":
# UI Options and ability to read cases from multiple orgs
ui_opts = get_ui_options()
ui_opts_get = ui_opts.get
from .helpers import case_read_multiple_orgs
multiple_orgs = case_read_multiple_orgs()[0]
if not tabs:
activity_tab_label = ui_opts_get("activity_tab_label")
if activity_tab_label:
ACTIVITIES = T(activity_tab_label)
else:
ACTIVITIES = T("Counseling Reasons")
# Basic Case Documentation
tabs = [(T("Basic Details"), None),
(T("Contact Info"), "contacts"),
(T("Family Members"), "group_membership/"),
(ACTIVITIES, "case_activity"),
]
# Optional Case Documentation
if ui_opts_get("case_use_response_tab"):
tabs.append((T("Actions"), "response_action"))
if ui_opts_get("case_use_tasks"):
tabs.append((T("ToDo"), "todo"))
if ui_opts_get("case_use_appointments"):
tabs.append((T("Appointments"), "case_appointment"))
if ui_opts_get("case_use_service_contacts"):
tabs.append((T("Service Contacts"), "service_contact"))
if ui_opts_get("case_use_photos_tab"):
tabs.append((T("Photos"), "image"))
# Uploads
tabs.append((T("Documents"), "document/"))
# Notes etc.
if ui_opts_get("case_use_notes"):
tabs.append((T("Notes"), "case_note"))
# Get the record data
lodging_opt = ui_opts_get("case_lodging")
if lodging_opt == "site":
lodging_sel = "dvr_case.site_id"
lodging_col = "dvr_case.site_id"
elif lodging_opt == "text":
lodging_sel = "case_details.lodging"
lodging_col = "dvr_case_details.lodging"
else:
lodging_sel = None
lodging_col = None
if ui_opts_get("case_use_flags"):
flags_sel = "dvr_case_flag_case.flag_id"
else:
flags_sel = None
if ui_opts_get("case_use_place_of_birth"):
pob_sel = "person_details.place_of_birth"
else:
pob_sel = None
if ui_opts_get("case_use_bamf"):
bamf_sel = "bamf.value"
else:
bamf_sel = None
case = resource.select(["first_name",
"last_name",
"dvr_case.status_id",
"dvr_case.archived",
"dvr_case.household_size",
"dvr_case.organisation_id",
"case_details.arrival_date",
bamf_sel,
"person_details.nationality",
pob_sel,
lodging_sel,
flags_sel,
],
represent = True,
raw_data = True,
).rows
if case:
# Extract case data
case = case[0]
name = lambda person: s3_fullname(person, truncate=False)
raw = case["_row"]
case_status = lambda row: case["dvr_case.status_id"]
archived = raw["dvr_case.archived"]
organisation = lambda row: case["dvr_case.organisation_id"]
arrival_date = lambda row: case["dvr_case_details.arrival_date"]
household_size = lambda row: case["dvr_case.household_size"]
nationality = lambda row: case["pr_person_details.nationality"]
# Warn if nationality is lacking while mandatory
if ui_opts_get("case_nationality_mandatory") and \
raw["pr_person_details.nationality"] is None:
current.response.warning = T("Nationality lacking!")
bamf = lambda row: case["pr_bamf_person_tag.value"]
if pob_sel:
place_of_birth = lambda row: case["pr_person_details.place_of_birth"]
else:
place_of_birth = None
if lodging_col:
lodging = (T("Lodging"), lambda row: case[lodging_col])
else:
lodging = None
if flags_sel:
flags = lambda row: case["dvr_case_flag_case.flag_id"]
else:
flags = None
else:
# Target record exists, but doesn't match filters
return None
arrival_date_label = ui_opts_get("case_arrival_date_label")
arrival_date_label = T(arrival_date_label) \
if arrival_date_label else T("Date of Entry")
# Adaptive rheader-fields
rheader_fields = [[None,
(T("Nationality"), nationality),
(T("Case Status"), case_status)],
[None, None, None],
[None, None, None],
]
if ui_opts_get("case_use_pe_label"):
rheader_fields[0][0] = (T("ID"), "pe_label")
rheader_fields[1][0] = "date_of_birth"
else:
rheader_fields[0][0] = "date_of_birth"
if pob_sel:
pob_row = 1 if rheader_fields[1][0] is None else 2
rheader_fields[pob_row][0] = (T("Place of Birth"), place_of_birth)
if bamf_sel:
doe_row = 2
rheader_fields[1][1] = (T("BAMF-Az"), bamf)
else:
doe_row = 1
rheader_fields[doe_row][1] = (arrival_date_label, arrival_date)
if lodging:
rheader_fields[1][2] = lodging
if ui_opts_get("case_show_total_consultations"):
from .helpers import get_total_consultations
total_consultations = (T("Number of Consultations"), get_total_consultations)
if rheader_fields[1][2] is None:
rheader_fields[1][2] = total_consultations
else:
rheader_fields[0].append(total_consultations)
hhsize = (T("Size of Family"), household_size)
if rheader_fields[1][0] is None:
rheader_fields[1][0] = hhsize
elif rheader_fields[2][0] is None:
rheader_fields[2][0] = hhsize
elif rheader_fields[1][2] is None:
rheader_fields[1][2] = hhsize
else:
rheader_fields[2][2] = hhsize
colspan = 5
if multiple_orgs:
# Show organisation if user can see cases from multiple orgs
rheader_fields.insert(0, [(T("Organization"), organisation, colspan)])
if flags_sel:
rheader_fields.append([(T("Flags"), flags, colspan)])
if ui_opts_get("case_header_protection_themes"):
from .helpers import get_protection_themes
rheader_fields.append([(T("Protection Need"),
get_protection_themes,
colspan,
)])
if archived:
# "Case Archived" hint
hint = lambda record: SPAN(T("Invalid Case"), _class="invalid-case")
rheader_fields.insert(0, [(None, hint)])
# Generate rheader XML
rheader = S3ResourceHeader(rheader_fields, tabs, title=name)(
r,
table = resource.table,
record = record,
)
# Add profile picture
from core import s3_avatar_represent
rheader.insert(0, A(s3_avatar_represent(record_id,
"pr_person",
_class = "rheader-avatar",
_width = 60,
_height = 60,
),
_href=URL(f = "person",
args = [record_id, "image"],
vars = r.get_vars,
),
)
)
return rheader
elif tablename == "dvr_case":
if not tabs:
tabs = [(T("Basic Details"), None),
(T("Activities"), "case_activity"),
]
rheader_fields = [["reference"],
["status_id"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
table=resource.table,
record=record,
)
return rheader
# =============================================================================
def drk_org_rheader(r, tabs=None):
""" ORG custom resource headers """
if r.representation != "html":
# Resource headers only used in interactive views
return None
from core import s3_rheader_resource, s3_rheader_tabs, S3ResourceHeader
from .uioptions import get_ui_options
s3db = current.s3db
tablename, record = s3_rheader_resource(r)
if tablename != r.tablename:
resource = s3db.resource(tablename, id=record.id)
else:
resource = r.resource
rheader = None
rheader_fields = []
if record:
T = current.T
record_id = record.id
ui_options = get_ui_options()
is_admin = current.auth.s3_has_role("ADMIN")
if tablename == "org_organisation":
table = resource.table
if record.root_organisation == record_id:
branch = False
else:
branch = True
# Custom tabs
tabs = [(T("Basic Details"), None),
(T("Branches"), "branch"),
(T("Facilities"), "facility"),
(T("Staff & Volunteers"), "human_resource"),
#(T("Projects"), "project"),
(T("Counseling Themes"), "response_theme"),
]
if is_admin or \
ui_options.get("response_themes_needs") or \
ui_options.get("activity_use_need"):
# Ability to manage org-specific need types
# as they are used in themes:
tabs.append((T("Counseling Reasons"), "need"))
if not branch and \
(is_admin or \
ui_options.get("case_document_templates") and \
current.auth.s3_has_role("ORG_ADMIN")):
tabs.append((T("Document Templates"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
# Custom header
from gluon import TABLE, TR, TH, TD
rheader = DIV()
# Name
record_data = TABLE(TR(TH("%s: " % table.name.label),
record.name,
),
)
# Parent Organisation
if branch:
btable = s3db.org_organisation_branch
query = (btable.branch_id == record_id) & \
(btable.organisation_id == table.id)
row = current.db(query).select(table.id,
table.name,
limitby = (0, 1),
).first()
if row:
record_data.append(TR(TH("%s: " % T("Branch of")),
A(row.name, _href=URL(args=[row.id, "read"])),
))
# Website as link
if record.website:
record_data.append(TR(TH("%s: " % table.website.label),
A(record.website, _href=record.website)))
logo = s3db.org_organisation_logo(record)
if logo:
rheader.append(TABLE(TR(TD(logo),
TD(record_data),
)))
else:
rheader.append(record_data)
rheader.append(rheader_tabs)
return rheader
elif tablename == "org_facility":
if not tabs:
tabs = [(T("Basic Details"), None),
]
rheader_fields = [["name", "email"],
["organisation_id", "phone1"],
["location_id", "phone2"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
table=resource.table,
record=record,
)
return rheader
# END =========================================================================
| [
"[email protected]"
] | |
57a81a6f705289723249fb0b09e8a065b08ab8cf | 5fbf2adec8d7647b9aeefa51695aa3f13ee57810 | /server/util/ah_handlers.py | 455a1dd878527b50e58dde3861598691f56b2737 | [] | no_license | angelacantfly/dancedeets-monorepo | 8bb6579f6f5d30e88c8d4c0e239c6c8fed678094 | 6b7a48d91d0737010acd9e08a89d99c2c982205a | refs/heads/master | 2021-01-20T09:14:22.613044 | 2017-08-26T21:48:14 | 2017-08-26T21:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import webapp2
import app
@app.route('/_ah/start')
class StartHandler(webapp2.RequestHandler):
def get(self):
pass
@app.route('/_ah/stop')
class StopHandler(webapp2.RequestHandler):
def get(self):
pass
| [
"[email protected]"
] | |
1c8ee601911e011097943b52fa643c5de3d37cf9 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/proc/memhist5min.py | 2003290fe47c7f53c9c7ed8a141092387a15e88c | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 10,843 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class MemHist5min(Mo):
"""
A class that represents historical statistics for memory in a 5 minute sampling interval. This class updates every 10 seconds.
"""
meta = StatsClassMeta("cobra.model.proc.MemHist5min", "memory")
counter = CounterMeta("current", CounterCategory.GAUGE, "gB", "Memory Allocated")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "currentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "currentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "currentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "currentSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "currentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "currentTr"
meta._counters.append(counter)
meta.moClassName = "procMemHist5min"
meta.rnFormat = "HDprocMem5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical memory stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.proc.Entity")
meta.parentClasses.add("cobra.model.proc.Entry")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.proc.MemHist")
meta.rnPrefixes = [
('HDprocMem5min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "currentAvg", "currentAvg", 10497, PropCategory.IMPLICIT_AVG)
prop.label = "Memory Allocated average value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentAvg", prop)
prop = PropMeta("str", "currentMax", "currentMax", 10496, PropCategory.IMPLICIT_MAX)
prop.label = "Memory Allocated maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentMax", prop)
prop = PropMeta("str", "currentMin", "currentMin", 10495, PropCategory.IMPLICIT_MIN)
prop.label = "Memory Allocated minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentMin", prop)
prop = PropMeta("str", "currentSpct", "currentSpct", 10498, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Memory Allocated suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("currentSpct", prop)
prop = PropMeta("str", "currentThr", "currentThr", 10499, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Memory Allocated thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("currentThr", prop)
prop = PropMeta("str", "currentTr", "currentTr", 10500, PropCategory.IMPLICIT_TREND)
prop.label = "Memory Allocated trend"
prop.isOper = True
prop.isStats = True
meta.props.add("currentTr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 7037, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
ffc1de01b564f7729799b45337e5d8ae9fbb92ee | 03330fc41b226e3b597676944b335a77f1979965 | /examples/using_xref.py | 05042801a9b20fdce5800a420dcd161fb80fed47 | [
"MIT"
] | permissive | ols3er/ezdxf | b00076742022b21118d3645685205fbdae419b38 | a01ed68ea45f25a231e470d239aefed73ab285d5 | refs/heads/master | 2020-05-29T16:57:18.235926 | 2019-02-24T03:41:09 | 2019-02-24T03:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # Copyright (c) 2018 Manfred Moitzi
# License: MIT License
import ezdxf
# AutoCAD 2010 can not resolve XREFS in DXF R12 Format :-(,
ref_dwg = ezdxf.new('R2000')
ref_dwg.modelspace().add_circle(center=(5, 5), radius=2.5)
ref_dwg.header['$INSBASE'] = (5, 5, 0) # set insertion point
ref_dwg.saveas("xref_drawing.dxf")
# XREF definition
host_dwg = ezdxf.new('R2000')
host_dwg.add_xref_def(filename='xref_drawing.dxf', name='my_xref')
host_dwg.modelspace().add_blockref(name='my_xref', insert=(0, 0))
host_dwg.saveas("using_xref.dxf")
| [
"[email protected]"
] | |
46b6ba0aa78786fca3bae6b7c69830c69f629ac2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_202/ch3_2020_02_21_14_51_08_594299.py | dd59ba6a96579ccaf3b3644f63149357f60d9fb2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | import math
def calcula_gaussiana(x,u,o):
return((math.e**(-0.5*((x-u)/o)**2))/o*math.sqrt(2*pi)) | [
"[email protected]"
] | |
7524a1a94270cde413832fe855ab35bb965326de | ff67167c2a620a4da7bae8d945cebf4643f46186 | /cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_terminal_device_cfg.py | 43bc75e9dcadd2fe7ed79416905ba8b1a842376a | [
"Apache-2.0"
] | permissive | gaybro8777/ydk-py | ebbf4a15ac1d699d8c33492c96587c41b5159467 | fff6f9da0b37aea117856f415e1a6f1b5eba53cf | refs/heads/master | 2021-10-26T13:15:55.819384 | 2019-04-12T23:02:29 | 2019-04-12T23:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,596 | py | """ Cisco_IOS_XR_terminal_device_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR terminal\-device package configuration.
This module contains definitions
for the following management objects\:
logical\-channels\: Logical channel in mxp
optical\-channels\: optical channels
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class LogicalAdminState(Enum):
"""
LogicalAdminState (Enum Class)
Logical admin state
.. data:: enable = 1
Enable
.. data:: disable = 2
Disable
.. data:: maintenance = 3
Maintenance
"""
enable = Enum.YLeaf(1, "enable")
disable = Enum.YLeaf(2, "disable")
maintenance = Enum.YLeaf(3, "maintenance")
class LogicalChannelAssignment(Enum):
"""
LogicalChannelAssignment (Enum Class)
Logical channel assignment
.. data:: type_logical_channel = 1
Type Logical channel
.. data:: type_optical_channel = 2
Type Optical channel
"""
type_logical_channel = Enum.YLeaf(1, "type-logical-channel")
type_optical_channel = Enum.YLeaf(2, "type-optical-channel")
class LogicalChannelOtnTtiAuto(Enum):
"""
LogicalChannelOtnTtiAuto (Enum Class)
Logical channel otn tti auto
.. data:: false = 0
Otn tti auto mode false
.. data:: true = 1
Otn tti auto mode true
"""
false = Enum.YLeaf(0, "false")
true = Enum.YLeaf(1, "true")
class LogicalLoopbackMode(Enum):
"""
LogicalLoopbackMode (Enum Class)
Logical loopback mode
.. data:: none = 0
None
.. data:: facility = 1
Facility
.. data:: terminal = 2
Terminal
"""
none = Enum.YLeaf(0, "none")
facility = Enum.YLeaf(1, "facility")
terminal = Enum.YLeaf(2, "terminal")
class LogicalProtocol(Enum):
"""
LogicalProtocol (Enum Class)
Logical protocol
.. data:: type_ethernet = 1
Type Ethernet
.. data:: type_otn = 2
Type OTN
"""
type_ethernet = Enum.YLeaf(1, "type-ethernet")
type_otn = Enum.YLeaf(2, "type-otn")
class LogicalTribProtocol(Enum):
"""
LogicalTribProtocol (Enum Class)
Logical trib protocol
.. data:: trib_proto_type1ge = 1
1G Ethernet protocol
.. data:: trib_proto_type_oc48 = 2
OC48 protocol
.. data:: trib_proto_type_stm16 = 3
STM 16 protocol
.. data:: trib_proto_type10gelan = 4
10G Ethernet LAN protocol
.. data:: trib_proto_type10gewan = 5
10G Ethernet WAN protocol
.. data:: trib_proto_type_oc192 = 6
OC 192 (9.6GB) port protocol
.. data:: trib_proto_type_stm64 = 7
STM 64 protocol
.. data:: trib_proto_type_otu2 = 8
OTU 2 protocol
.. data:: trib_proto_type_otu2e = 9
OTU 2e protocol
.. data:: trib_proto_type_otu1e = 10
OTU 1e protocol
.. data:: trib_proto_type_odu2 = 11
ODU 2 protocol
.. data:: trib_proto_type_odu2e = 12
ODU 2e protocol
.. data:: trib_proto_type40ge = 13
40G Ethernet port protocol
.. data:: trib_proto_type_oc768 = 14
OC 768 protocol
.. data:: trib_proto_type_stm256 = 15
STM 256 protocol
.. data:: trib_proto_type_otu3 = 16
OTU 3 protocol
.. data:: trib_proto_type_odu3 = 17
ODU 3 protocol
.. data:: trib_proto_type100ge = 18
100G Ethernet protocol
.. data:: trib_proto_type100g_mlg = 19
100G MLG protocol
.. data:: trib_proto_type_otu4 = 20
OTU4 signal protocol (112G) for transporting
100GE signal
.. data:: trib_proto_type_otu_cn = 21
OTU Cn protocol
.. data:: trib_proto_type_odu4 = 22
ODU 4 protocol
"""
trib_proto_type1ge = Enum.YLeaf(1, "trib-proto-type1ge")
trib_proto_type_oc48 = Enum.YLeaf(2, "trib-proto-type-oc48")
trib_proto_type_stm16 = Enum.YLeaf(3, "trib-proto-type-stm16")
trib_proto_type10gelan = Enum.YLeaf(4, "trib-proto-type10gelan")
trib_proto_type10gewan = Enum.YLeaf(5, "trib-proto-type10gewan")
trib_proto_type_oc192 = Enum.YLeaf(6, "trib-proto-type-oc192")
trib_proto_type_stm64 = Enum.YLeaf(7, "trib-proto-type-stm64")
trib_proto_type_otu2 = Enum.YLeaf(8, "trib-proto-type-otu2")
trib_proto_type_otu2e = Enum.YLeaf(9, "trib-proto-type-otu2e")
trib_proto_type_otu1e = Enum.YLeaf(10, "trib-proto-type-otu1e")
trib_proto_type_odu2 = Enum.YLeaf(11, "trib-proto-type-odu2")
trib_proto_type_odu2e = Enum.YLeaf(12, "trib-proto-type-odu2e")
trib_proto_type40ge = Enum.YLeaf(13, "trib-proto-type40ge")
trib_proto_type_oc768 = Enum.YLeaf(14, "trib-proto-type-oc768")
trib_proto_type_stm256 = Enum.YLeaf(15, "trib-proto-type-stm256")
trib_proto_type_otu3 = Enum.YLeaf(16, "trib-proto-type-otu3")
trib_proto_type_odu3 = Enum.YLeaf(17, "trib-proto-type-odu3")
trib_proto_type100ge = Enum.YLeaf(18, "trib-proto-type100ge")
trib_proto_type100g_mlg = Enum.YLeaf(19, "trib-proto-type100g-mlg")
trib_proto_type_otu4 = Enum.YLeaf(20, "trib-proto-type-otu4")
trib_proto_type_otu_cn = Enum.YLeaf(21, "trib-proto-type-otu-cn")
trib_proto_type_odu4 = Enum.YLeaf(22, "trib-proto-type-odu4")
class LogicalTribRate(Enum):
"""
LogicalTribRate (Enum Class)
Logical trib rate
.. data:: trib_rate1g = 1
TribRate1G
.. data:: trib_rate2_5g = 2
TribRate25G
.. data:: trib_rate10g = 3
TribRate10G
.. data:: trib_rate40g = 4
TribRate40G
.. data:: trib_rate100g = 5
TribRate100G
"""
trib_rate1g = Enum.YLeaf(1, "trib-rate1g")
trib_rate2_5g = Enum.YLeaf(2, "trib-rate2-5g")
trib_rate10g = Enum.YLeaf(3, "trib-rate10g")
trib_rate40g = Enum.YLeaf(4, "trib-rate40g")
trib_rate100g = Enum.YLeaf(5, "trib-rate100g")
class LogicalChannels(Entity):
"""
Logical channel in mxp
.. attribute:: channel
Logical channel index
**type**\: list of :py:class:`Channel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalChannels.Channel>`
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(LogicalChannels, self).__init__()
self._top_entity = None
self.yang_name = "logical-channels"
self.yang_parent_name = "Cisco-IOS-XR-terminal-device-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("channel", ("channel", LogicalChannels.Channel))])
self._leafs = OrderedDict()
self.channel = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-terminal-device-cfg:logical-channels"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LogicalChannels, [], name, value)
class Channel(Entity):
"""
Logical channel index
.. attribute:: channel_index (key)
Logical Channel Index
**type**\: int
**range:** 0..4294967295
.. attribute:: logical_channel_assignments
Logical channel assignment for logical channel
**type**\: :py:class:`LogicalChannelAssignments <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalChannels.Channel.LogicalChannelAssignments>`
.. attribute:: otn
Otn Related configs for Logical channel
**type**\: :py:class:`Otn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalChannels.Channel.Otn>`
.. attribute:: trib_protocol
Protocol framing of the tributary signal
**type**\: :py:class:`LogicalTribProtocol <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalTribProtocol>`
.. attribute:: description
Description (Max 255 characters)
**type**\: str
**length:** 1..255
.. attribute:: ingress_client_port
Configure ingress client port for this logical channel
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: ingress_physical_channel
Configure ingress physical channel for this logical channel
**type**\: int
**range:** 1..4
.. attribute:: admin_state
Configure the admin\-state
**type**\: :py:class:`LogicalAdminState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalAdminState>`
.. attribute:: loopback_mode
Configure the loopback mode
**type**\: :py:class:`LogicalLoopbackMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalLoopbackMode>`
.. attribute:: logical_channel_type
Configure the logical\-channel\-type
**type**\: :py:class:`LogicalProtocol <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalProtocol>`
.. attribute:: rate_class
Rounded bit rate of the tributary signal
**type**\: :py:class:`LogicalTribRate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalTribRate>`
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(LogicalChannels.Channel, self).__init__()
self.yang_name = "channel"
self.yang_parent_name = "logical-channels"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['channel_index']
self._child_classes = OrderedDict([("logical-channel-assignments", ("logical_channel_assignments", LogicalChannels.Channel.LogicalChannelAssignments)), ("otn", ("otn", LogicalChannels.Channel.Otn))])
self._leafs = OrderedDict([
('channel_index', (YLeaf(YType.uint32, 'channel-index'), ['int'])),
('trib_protocol', (YLeaf(YType.enumeration, 'trib-protocol'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalTribProtocol', '')])),
('description', (YLeaf(YType.str, 'description'), ['str'])),
('ingress_client_port', (YLeaf(YType.str, 'ingress-client-port'), ['str'])),
('ingress_physical_channel', (YLeaf(YType.uint32, 'ingress-physical-channel'), ['int'])),
('admin_state', (YLeaf(YType.enumeration, 'admin-state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalAdminState', '')])),
('loopback_mode', (YLeaf(YType.enumeration, 'loopback-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalLoopbackMode', '')])),
('logical_channel_type', (YLeaf(YType.enumeration, 'logical-channel-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalProtocol', '')])),
('rate_class', (YLeaf(YType.enumeration, 'rate-class'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalTribRate', '')])),
])
self.channel_index = None
self.trib_protocol = None
self.description = None
self.ingress_client_port = None
self.ingress_physical_channel = None
self.admin_state = None
self.loopback_mode = None
self.logical_channel_type = None
self.rate_class = None
self.logical_channel_assignments = LogicalChannels.Channel.LogicalChannelAssignments()
self.logical_channel_assignments.parent = self
self._children_name_map["logical_channel_assignments"] = "logical-channel-assignments"
self.otn = LogicalChannels.Channel.Otn()
self.otn.parent = self
self._children_name_map["otn"] = "otn"
self._segment_path = lambda: "channel" + "[channel-index='" + str(self.channel_index) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-terminal-device-cfg:logical-channels/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LogicalChannels.Channel, ['channel_index', 'trib_protocol', 'description', 'ingress_client_port', 'ingress_physical_channel', 'admin_state', 'loopback_mode', 'logical_channel_type', 'rate_class'], name, value)
class LogicalChannelAssignments(Entity):
"""
Logical channel assignment for logical channel
.. attribute:: logical_channel_assignment
Logical Channel Assignment id
**type**\: list of :py:class:`LogicalChannelAssignment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalChannels.Channel.LogicalChannelAssignments.LogicalChannelAssignment>`
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(LogicalChannels.Channel.LogicalChannelAssignments, self).__init__()
self.yang_name = "logical-channel-assignments"
self.yang_parent_name = "channel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("logical-channel-assignment", ("logical_channel_assignment", LogicalChannels.Channel.LogicalChannelAssignments.LogicalChannelAssignment))])
self._leafs = OrderedDict()
self.logical_channel_assignment = YList(self)
self._segment_path = lambda: "logical-channel-assignments"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LogicalChannels.Channel.LogicalChannelAssignments, [], name, value)
class LogicalChannelAssignment(Entity):
"""
Logical Channel Assignment id
.. attribute:: assignment_index (key)
Logical channel assignment index
**type**\: int
**range:** 0..4294967295
.. attribute:: description
Configure description for this assignment
**type**\: str
**length:** 1..255
.. attribute:: logical_channel_id
Configure logical channel for this assignment
**type**\: int
**range:** 0..4294967295
.. attribute:: assignment_type
Type of assignment for logical channel
**type**\: :py:class:`LogicalChannelAssignment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalChannelAssignment>`
.. attribute:: allocation
Configure Allocation for this assignment(10, 40 or 100G)
**type**\: int
**range:** 0..4294967295
.. attribute:: optical_channel_id
Configure optical channel for this assignment
**type**\: str
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(LogicalChannels.Channel.LogicalChannelAssignments.LogicalChannelAssignment, self).__init__()
self.yang_name = "logical-channel-assignment"
self.yang_parent_name = "logical-channel-assignments"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['assignment_index']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('assignment_index', (YLeaf(YType.uint32, 'assignment-index'), ['int'])),
('description', (YLeaf(YType.str, 'description'), ['str'])),
('logical_channel_id', (YLeaf(YType.uint32, 'logical-channel-id'), ['int'])),
('assignment_type', (YLeaf(YType.enumeration, 'assignment-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalChannelAssignment', '')])),
('allocation', (YLeaf(YType.uint32, 'allocation'), ['int'])),
('optical_channel_id', (YLeaf(YType.str, 'optical-channel-id'), ['str'])),
])
self.assignment_index = None
self.description = None
self.logical_channel_id = None
self.assignment_type = None
self.allocation = None
self.optical_channel_id = None
self._segment_path = lambda: "logical-channel-assignment" + "[assignment-index='" + str(self.assignment_index) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LogicalChannels.Channel.LogicalChannelAssignments.LogicalChannelAssignment, ['assignment_index', 'description', 'logical_channel_id', 'assignment_type', 'allocation', 'optical_channel_id'], name, value)
class Otn(Entity):
"""
Otn Related configs for Logical channel
.. attribute:: tti_msg_auto
Trail trace identifier (TTI) transmit message automatically created. If True, then setting a custom transmit message would be invalid. Trail trace identifier (TTI) transmit message automatically created
**type**\: :py:class:`LogicalChannelOtnTtiAuto <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.LogicalChannelOtnTtiAuto>`
.. attribute:: tti_msg_expected
Trail trace identifier (TTI) message expectedTrail trace identifier (TTI) message expected
**type**\: str
**length:** 1..255
.. attribute:: tti_msg_transmit
Trail trace identifier (TTI) message transmittedTrail trace identifier (TTI) message transmitted
**type**\: str
**length:** 1..255
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(LogicalChannels.Channel.Otn, self).__init__()
self.yang_name = "otn"
self.yang_parent_name = "channel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('tti_msg_auto', (YLeaf(YType.enumeration, 'tti-msg-auto'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg', 'LogicalChannelOtnTtiAuto', '')])),
('tti_msg_expected', (YLeaf(YType.str, 'tti-msg-expected'), ['str'])),
('tti_msg_transmit', (YLeaf(YType.str, 'tti-msg-transmit'), ['str'])),
])
self.tti_msg_auto = None
self.tti_msg_expected = None
self.tti_msg_transmit = None
self._segment_path = lambda: "otn"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LogicalChannels.Channel.Otn, ['tti_msg_auto', 'tti_msg_expected', 'tti_msg_transmit'], name, value)
def clone_ptr(self):
self._top_entity = LogicalChannels()
return self._top_entity
class OpticalChannels(Entity):
"""
optical channels
.. attribute:: optical_channel
Optical Channel index
**type**\: list of :py:class:`OpticalChannel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_terminal_device_cfg.OpticalChannels.OpticalChannel>`
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(OpticalChannels, self).__init__()
self._top_entity = None
self.yang_name = "optical-channels"
self.yang_parent_name = "Cisco-IOS-XR-terminal-device-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("optical-channel", ("optical_channel", OpticalChannels.OpticalChannel))])
self._leafs = OrderedDict()
self.optical_channel = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-terminal-device-cfg:optical-channels"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OpticalChannels, [], name, value)
class OpticalChannel(Entity):
"""
Optical Channel index
.. attribute:: ifname (key)
Optical Channel Name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: operational_mode
Configure operational mode
**type**\: int
**range:** 1..100000
.. attribute:: line_port
Specify R/S/I/P
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
"""
_prefix = 'terminal-device-cfg'
_revision = '2015-11-09'
def __init__(self):
super(OpticalChannels.OpticalChannel, self).__init__()
self.yang_name = "optical-channel"
self.yang_parent_name = "optical-channels"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['ifname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ifname', (YLeaf(YType.str, 'ifname'), ['str'])),
('operational_mode', (YLeaf(YType.uint32, 'operational-mode'), ['int'])),
('line_port', (YLeaf(YType.str, 'line-port'), ['str'])),
])
self.ifname = None
self.operational_mode = None
self.line_port = None
self._segment_path = lambda: "optical-channel" + "[ifname='" + str(self.ifname) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-terminal-device-cfg:optical-channels/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OpticalChannels.OpticalChannel, ['ifname', 'operational_mode', 'line_port'], name, value)
def clone_ptr(self):
self._top_entity = OpticalChannels()
return self._top_entity
| [
"[email protected]"
] | |
3119fae3fe1aadaa71c5cae9f1576b38a7c3afc3 | f68eda51246c95597def569224f3b56d4c3700e7 | /top/api/rest/PromotionLimitdiscountGetRequest.py | 4b9ebc258b2a7047be00899b998a81697c8c960a | [
"MIT",
"BSD-3-Clause"
] | permissive | stoensin/taobao-openapi | 47de8fb29ae2d8ce47d4fce07c0ccaeaee1ef91f | 202a9df2085229838541713bd24433a90d07c7fc | refs/heads/main | 2023-07-17T02:17:51.527455 | 2021-08-25T15:08:49 | 2021-08-25T15:08:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | '''
Created by auto_sdk on 2018.11.10
'''
from top.api.base import RestApi
class PromotionLimitdiscountGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.end_time = None
self.limit_discount_id = None
self.page_number = None
self.start_time = None
self.status = None
def getapiname(self):
return 'taobao.promotion.limitdiscount.get'
| [
"[email protected]"
] | |
c61c33ed11cd6124e71d682034e6e67551e279fc | 7d84000f2977def7118b4c93a47b9d71c4ee38f8 | /app/src/dbi.py | f5871ef0d1e50dfc9ad71010786307b81ee1d8cb | [] | no_license | tensorci/core | d405d17099987163dfc589711345ce414ace406e | 50d18bb43f73b1d5d47fefad543c2554e87a6520 | refs/heads/master | 2021-03-19T13:27:26.219591 | 2020-12-03T01:14:57 | 2020-12-03T01:14:57 | 110,917,313 | 0 | 0 | null | 2020-12-03T01:15:26 | 2017-11-16T03:20:09 | Python | UTF-8 | Python | false | false | 4,899 | py | """
Postgres Database Interface providing the following helper methods:
find_one
find_all
update
create
destroy
undestroy
delete
* Destroy-ing is the same as "soft" deleting a record...it will simply set the is_destroyed column to True
for a record. The helper methods used for querying the DB are automatically scoped to include is_destroyed=False
for a given query. One can simply pass in unscoped=True to these query helper methods to find ALL records for a model,
regardless of is_destroyed status. NOTE: If a table does NOT have an is_destroyed column on it, calling destroy
is the same as calling delete, and the record will be completely removed from the database.
Usage Examples:
user = dbi.create(User, {'email': '[email protected]'})
dbi.update(user, {'email': '[email protected]'})
dbi.destroy(user)
"""
from src import db
# Column used for soft-deleting models
IS_DESTROYED = 'is_destroyed'
def find_one(model, params={}, unscoped=False):
"""
Find the first record of a database model per specified query params
:param model: (required) model class to query (check models.py)
:param params: (optional) dict of params to query model with
:param unscoped: (optional) whether to gather ALL query results, regardless of model's is_destroyed status
:return: first model instance returned from DB query
"""
if hasattr(model, IS_DESTROYED) and not params.get(IS_DESTROYED) and not unscoped:
params[IS_DESTROYED] = False
return db.session.query(model).filter_by(**params).first()
def find_all(model, params={}, unscoped=False):
"""
Find ALL records of a database model per specified query params
:param model: (required) model class to query (check models.py)
:param params: (optional) dict of params to query model with
:param unscoped: (optional) whether to gather ALL query results, regardless of model's is_destroyed status
:return: list of model instances
"""
exact_params = {}
list_params = {}
for k, v in params.items():
if type(v).__name__ in ['list', 'tuple']:
list_params[k] = tuple(v)
else:
exact_params[k] = v
if hasattr(model, IS_DESTROYED) and not exact_params.get(IS_DESTROYED) and not unscoped:
exact_params[IS_DESTROYED] = False
query = db.session.query(model).filter_by(**exact_params)
for k, v in list_params.items():
query = query.filter(getattr(model, k).in_(v))
return query.all()
def update(model_instance, params={}):
"""
Update a model instance with new params
:param model_instance: (required) model instance to update
:param params: (optional) dict of params to update model with
:return: the updated model instance
"""
[setattr(model_instance, k, v) for k, v in params.items()]
db.session.commit()
return model_instance
def create(model, params={}):
"""
Create a model and save a new record for specified model class and params
:param model: (required) model class to create new record for
:param params: (model-dependent) dict of params to create model with
:return: the created model instance
"""
model_instance = model(**params)
db.session.add(model_instance)
db.session.commit()
return model_instance
def upsert(model, params={}, unscoped=False):
"""
Update model if already exists. Create new one if not.
:param model: (required) model class to upsert new record for
:param params: (model-dependent) dict of params to upsert model with
:return: tuple --> (model_instance, is_new)
"""
query_params = {k: v for k, v in params.items()}
model_instance = find_one(model, query_params, unscoped=unscoped)
if model_instance:
return model_instance, False
return create(model, params), True
def destroy(model_instance):
"""
"Soft" delete a model instance (if allowed); otherwise, hard delete it.
:param model_instance: (required) model instance to soft delete
:return: (boolean) whether the model instance was successfully soft deleted
"""
# If model is not soft-deletable, hard delete it.
if not hasattr(model_instance, IS_DESTROYED):
return delete(model_instance)
model_instance.is_destroyed = True
db.session.commit()
return True
def undestroy(model_instance):
"""
Undestroy a model instance
:param model: (required) model instance to undestroy
:return: (boolean) whether the model instance was successfully undestroyed
"""
if not hasattr(model_instance, IS_DESTROYED):
return False
model_instance.is_destroyed = False
db.session.commit()
return True
def delete(model_instance):
"""
Hard delete a model instance
:param model_instance: (required) model instance to hard delete
:return: (boolean) whether the model instance was successfully hard deleted
"""
db.session.delete(model_instance)
db.session.commit()
return True | [
"[email protected]"
] | |
17539ecb89461a97e039d325bef834b78d08259b | f415dd840e150a0ada86bc8b7c54f8d1c301e314 | /tests/helpers.py | 694db0ecd5e61ceb7f8490a25316267d22ec46a9 | [
"WTFPL"
] | permissive | Feuermurmel/venv_cli | 5c3680150f8c54fbbb4e5c36b3d609695b1b1104 | 87b5185d11ab4d6f66b8dd76533ab405f820ad97 | refs/heads/master | 2021-01-10T17:19:09.017138 | 2016-02-25T22:09:52 | 2016-02-25T22:09:52 | 51,231,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | import os, subprocess, sys, contextlib, pkgutil, tempfile, pytest
class RunResult:
def __init__(self, returncode : int, stdout : str, stderr : str):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
class Workspace:
"""
Allows executing commands and checking conditions in a temporary directory.
"""
def __init__(self, dir):
self.cwd = os.path.join(dir, 'cwd')
self.home = os.path.join(dir, 'home')
os.mkdir(self.cwd)
os.mkdir(self.home)
def _run_commands(self, lines):
environ = dict(os.environ)
environ['HOME'] = os.path.abspath(self.home)
process = subprocess.Popen(
['bash'],
cwd = self.cwd,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = environ)
input = ''.join(i + '\n' for i in lines).encode()
out, err = process.communicate(input)
sys.stdout.buffer.write(out)
sys.stderr.buffer.write(err)
# We expect all output to be valid UTF-8, mainly because all output should be ASCII.
return RunResult(process.returncode, out.decode(), err.decode())
def run(self, *lines, expect_error = False, expect_stdout_contains = '', expect_stderr_contains = ''):
"""
Runs the specified commands by piping them into a non-interactive bash process.
"""
def iter_lines():
yield 'set -e'
for i in lines:
yield i
# Enable errexit whenever a new shell session might have been started.
if i.split()[0] == 'venv':
yield 'set -e'
result = self._run_commands(list(iter_lines()))
if expect_error:
assert result.returncode
else:
assert not result.returncode
assert expect_stdout_contains in result.stdout
assert expect_stderr_contains in result.stderr
return result
def check_venv(self, path = 'venv', *, exists = True):
if exists:
self.run(
'. {}/bin/activate'.format(path),
'[ "$VIRTUAL_ENV" ]')
else:
self.run(
'! [ -e venv ]')
def create_file(self, path, content : str = ''):
with open(os.path.join(self.cwd, path), 'w', encoding = 'utf-8') as file:
file.write(content)
def create_dir(self, path):
os.makedirs(os.path.join(self.cwd, path), exist_ok = True)
def check_file(self, path, content = None, *, exists = True):
file_path = os.path.join(self.cwd, path)
if exists:
assert os.path.isfile(file_path)
if content is not None:
with open(file_path, 'r', encoding = 'utf-8') as file:
assert file.read() == content
else:
if content is not None:
raise ValueError('content must be None if exists is set to False.')
assert not os.path.exists(file_path)
def check_dir(self, dirs = [], files = [], *, path = '.', exclude_hidden = True):
"""
Check that a set of directories exists and that only those directories exist.
"""
found_dirs = set()
found_files = set()
for i in os.listdir(os.path.join(self.cwd, path)):
if not (i.startswith('.') and exclude_hidden):
item_path = os.path.join(self.cwd, path, i)
if os.path.isdir(item_path):
found_dirs.add(i)
elif os.path.isfile(item_path):
found_files.add(i)
if dirs is not None:
assert found_dirs == set(dirs)
if files is not None:
assert found_files == set(files)
@contextlib.contextmanager
def workspace(*, virtualenvs = [], dummy_project = False):
with tempfile.TemporaryDirectory() as temp_dir:
ws = Workspace(temp_dir)
if dummy_project:
for i in 'setup.py', 'venv_cli_dummy.py':
data = pkgutil.get_data(__name__, os.path.join('example_project', i)).decode()
ws.create_file(i, data)
for i in virtualenvs:
ws.run('venv --no-activate {}'.format(i))
yield ws
| [
"[email protected]"
] | |
c8360ce86d4bfa906128d511027631bc4d8e4c69 | d13a1adcf9dda6717dcd1957189b1ad948bffebc | /onepk_without_pyc/tutorials/session/SessionTutorial.py | a605f4dac88adda06955bde55abc20c9759b5cdf | [] | no_license | neoyogi/onepk | ca81170457cfb49ae7a79d3cba58552ce6b74a89 | 54bc49eaed14f7832aca45c4f52311a00282d862 | refs/heads/master | 2021-01-01T19:43:22.849196 | 2015-02-17T11:16:02 | 2015-02-17T11:16:02 | 30,422,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,755 | py | #!/usr/bin/env python
#
# Copyright (c) 2010-2013, Cisco Systems, Inc.
#
# THIS SAMPLE CODE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY
# BY CISCO SOLELY FOR THE PURPOSE of PROVIDING PROGRAMMING EXAMPLES.
# CISCO SHALL NOT BE HELD LIABLE FOR ANY USE OF THE SAMPLE CODE IN ANY
# APPLICATION.
#
# Redistribution and use of the sample code, with or without
# modification, are permitted provided that the following conditions
# are met:
# Redistributions of source code must retain the above disclaimer.
#
#
"""
Session Tutorial
This tutorial is intended for application developers who need to specify configuration parameters for
connections made between onePK applications and network elements.
In onePK, a session represents an authenticated channel of communication
between an application and a network element. This tutorial shows
how to configure a session, how to get the properties and statistics of a
session.
@author The onePK Team ([email protected])
"""
import logging
from BaseTutorial import BaseTutorial
from onep.element import NetworkElement
from onep.element import NetworkApplication
from onep.element import SessionConfig
from onep.element import SessionHandle
from onep.element import SessionStatistics
from onep.element import SessionProperty
from onep.element import ConnectionListener
from onep.core.exception import OnepException
from onep.core.util import tlspinning
logger = logging.getLogger('onep:SessionTutorial')
logger.setLevel(logging.INFO)
class SessionTutorial(BaseTutorial):
@classmethod
def createSessionConfig(self, mode):
"""
Creates an instance of SessionConfig with the given transport mode and
sets the reconnect timer to one minute. All other attributes are set to
their default values.
When connecting to a network element, the caller may optionally provide a
SessionConfig that contains the desired configuration for the resulting
session. When creating the SessionConfig, the only required attribute is
the transport mode. TLS is the transport mode used for the end node
hosting model. TIPC (sometimes referred to as LOCAL) may be used in
process and blade hosting models. All other attributes are optional,
and will take on their default values if not explicitly set. To
demonstrate reconnecting to the session, the reconnect timer will be set
to one minute.
@param mode The transport mode used by the connection.
@return a SessionConfig instance.
"""
# START SNIPPET: create_session_config
# Construct a SessionConfig instance with the given transport mode.
config = SessionConfig(mode)
# Set the reconnect timer to one minute.
config.reconnectTimer = 60
# The session attributes below this point are set to their default
# values.
#
# Set the port to connect to on the network element.
# TLS 15002
# TIPC N/A
#
if mode.lower() == "tls":
config.port = config.DEFAULT_PORT
config.transportMode = SessionConfig.SessionTransportMode.TLS
config.ca_certs = tutorial.root_cert_path
config.keyfile = tutorial.client_key_path
config.certfile = tutorial.client_cert_path
else:
# Not required for TIPC.
pass
# Set the event queue size of the session.
config.eventQueueSize = config.DEFAULT_EVENT_QUEUE_SIZE
# Set the event thread pool size of the session.
config.eventThreadPool = config.DEFAULT_THREADPOOL_SIZE
# Set the event drop mode of the session.
config.eventDropMode = config.DEFAULT_EVENT_DROP_MODE
# Set the keepalive attributes of the session.
# Idle time in seconds
config.keepAliveIdleTime = config.DEFAULT_KEEPALIVE_IDLE_TIME
# Interval between keepalives in seconds
config.keepAliveInterval = config.DEFAULT_KEEPALIVE_INTERVAL
# Number of keepalives
config.keepAliveRetryCount = config.DEFAULT_KEEPALIVE_RETRY_COUNT
# END SNIPPET: create_session_config
config.set_tls_pinning(tutorial.tls_pinning_file, PinningHandler(tutorial.tls_pinning_file))
return config
def connectWithConfig(self, applicationName, config):
"""
Initializes the network application. Then, gets the network element and
connects to it with the given session configuration.
@param applicationName The unique name of this application.
@param config Configuration options instance.
@return SessionHandle The handle of the connected session, or
null if there was an error.
@throws OnepException If there was an error in executing a onePK
call.
"""
# Get the NetworkApplication instance.
networkApplication = NetworkApplication.get_instance()
# Set the name of the application to applicationName.
networkApplication.name = applicationName
# Get the network element's address or hostname passed in from the
# command line or the properties file.
#
element_hostname = tutorial.get_element_hostname()
# Get the NetworkElement instance at the given hostname.
networkElement = networkApplication.get_network_element(element_hostname)
logger.info("Got a NetworkElement - %s ", networkElement)
# Set the network element for this tutorial.
tutorial.set_network_element(networkElement)
# Connect to the element using the given session configuration. If no
# configuration is specified by the caller (i.e. null is used), the
# session will take on the default values.
#
# START SNIPPET: onep_element_connect
handle = networkElement.connect(tutorial.get_username(), tutorial.get_password(), config)
# END SNIPPET: onep_element_connect
logger.info("Successfully connected to NetworkElement - %s", tutorial.get_network_element())
return handle
def printSessionProperties(self, config,handle):
"""
Prints the session properties to the logger.
@param handle The handle to the session to print the properties
for.
"""
# START SNIPPET: print_session_properties
# Get the property instance for this session using the
# session handle.
#
property = handle.sessionProp
# Get the port number the session is connected on.
logger.info("Port: " + str(property.port))
# Get the event queue size of the session.
logger.info("EventQueueSize: " + str(property.eventQueueSize))
# Get the event thread pool size of the session.
logger.info("EventThreadPool: " + str(property.eventThreadPool))
# Get the event drop mode of the session.
logger.info("EventDropMode: " + str(property.eventDropMode))
# Get the reconnect timer of the session in seconds.
logger.info("ReconnectTimer: " + str(property.reconnectTimer))
# Get the transport mode of the session.
logger.info("TransportMode: " + str(property.transportMode))
# END SNIPPET: print_session_properties
def printSessionStatistics(self, handle):
# START SNIPPET: print_session_statistics
# Get the statistics instance for this session using the
# session handle.
#
statistics = handle.sessionStat
# Get the count of events received and dropped.
logger.info("Events Total: %s", statistics.eventTotalCount)
logger.info("\nEvents Dropped: %s", statistics.eventDropCount)
# END SNIPPET: print_session_statistics
def simulate_disconnect(self):
'''
Internal function for testing tutorial reconnect
'''
self.get_network_element()._reconnect_waiting = True
self.get_network_element().disconnect()
class PinningHandler(tlspinning.TLSUnverifiedElementHandler):
def __init__(self, pinning_file):
self.pinning_file = pinning_file
def handle_verify(self, host,hashtype, finger_print, changed):
"""
Callback to the app to determine whether to add a host to pinning DB
Upon receipt of a certificate which fails to match based on server-name or
IP address, and for which there is no match in the pinning database,
this callback asks the application whether to accept the
connection and/or whether to add the server to the pinning database.
By default, the connection will be terminated and the pinning db will
remain unchanged.
@param host: String containing either the FQDN or
a text version of the IP address
@param hashtype: If there was a host name with a non-matching
certificate, this will be the hash-type from that entry.
If there was no entry, this will be created as "SHA-1".
@param finger_print: Fingerprint text created from the certificate.
This will be a series of hex bytes separated by
colons of the form "A1:B2:C3:..."
@changed: changed is TRUE if there was an existing entry in the database
but the certificate does not match. FALSE indicates that there was
no entry in the database for this host.
@return: ACCEPT_AND_PIN if onep should both accept
the connection and add the entry to the pinning database.
ACCEPT_ONCE if onep should only accept the connection
but not add the entry to the pinning database.
REJECT if onep should neither accept the connection
nor add the entry to the pinning database.
"""
# START SNIPPET: pin_handler
if changed:
msg = "\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
msg +="WARNING: THE CERTIFICATE PRESENTED BY REMOTE HOST '%s'\n IS DIFFERENT FROM THE ONE PREVIOUSLY ACCEPTED" %(host)
msg +="\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
else:
msg = "WARNING: Certificate presented by remote host '%s' is not verified."%(host)
msg += "\n\nThe %s fingerprint sent by the remote host(%s) is:\n%s" %(hashtype, host, finger_print)
msg += "\n\nYou MUST verify the certificate on remote host before proceeding! \n"
msg += "\nChoose from following options:"
if self.pinning_file:
prompt = "\nAccept and Pin (p), Accept Once (o), Reject (r) (default) :"
else:
prompt = "\nAccept Once (o), Reject (r) (default) :"
sys.stdout.write(msg)
self.decision = raw_input(prompt)
while True:
if not self.decision or self.decision.lower() == 'r':
return tlspinning.DecisionType.REJECT
elif self.decision.lower() == 'p' and self.pinning_file:
return tlspinning.DecisionType.ACCEPT_AND_PIN
elif self.decision.lower() == 'o':
return tlspinning.DecisionType.ACCEPT_ONCE
else:
self.decision = raw_input(prompt)
# END SNIPPET: pin_handler
class TutorialReconnectListener(ConnectionListener):
'''
Build a listener class to react to application connection
events. The connection listener will be registered directly
to the instantiated NetworkElement class.
In this example we have setup a log to send messages to the
application logger. We have also added a maximum reconnect
retry count and a flag to tell the listener when the application
wants to exit without a reconnect attempt.
'''
# START SNIPPET: reconnect
log = logging.getLogger('onep:SessionTutorial')
log.setLevel(logging.INFO)
retry = 3
app_terminate = False
def handle_event(self, event, data):
self.log.info("\n********* CONNECT LISTENER *******")
self.log.info('Received connection event %s',
event.elem.OnepSessionState.enumval(event.state))
if self.app_terminate:
self.log.info("\n********* TUTORIAL TERMINATED *******")
return
if event.state == event.elem.OnepSessionState.ONEP_STATE_DISCONNECTED:
if not self.retry:
self.log.info("\n********* RECONNECT RETRY MAX FOR %d *******" % data['id'])
event.elem.set_connection_listener(None, None)
return
try:
self.log.info("\n********* RECONNECT SESSION %d *******" % data['id'])
event.elem.reconnect(data['user'], data['pwd'],
data['id'], data['sess'])
except Exception as e:
self.retry -= 1
self.log.info("\n********* RECONNECT FAILED SESSION %d*******" %data['id'])
self.log.info("\n********* %s *******" % str(e))
# END SNIPPET: reconnect
if __name__ == '__main__':
import sys
tutorial = SessionTutorial(sys.argv)
# Parse arguments from command line or properties file.
if not tutorial.parse_command_line():
logger.error("Error in parsing arguments")
sys.exit(1)
# Create a session configuration with transport mode socket.
config = tutorial.createSessionConfig(tutorial.get_transport())
logger.info("\n********* INITIALIZE AND CONNECT *******")
# Connect to the network element using the given configuration.
originalSessionHandle = None
try:
originalSessionHandle = tutorial.connectWithConfig("Session Tutorial", config)
except OnepException as e:
logger.error("Failed to connect to element.", e)
# Upon a successful connection, a session is established and a handle
# is returned in the form of a SessionHandle. When a session is in the
# connected state, its configuration cannot be modified. The session
# handle may be used to query information about the session. Here, we
# use it to get the session's ID, which will be needed when we want to
# reconnect to the session.
#
# START SNIPPET: onep_session_handle_get_id
sessionID = originalSessionHandle._id
# END SNIPPET: onep_session_handle_get_id
logger.info("Connected to network element with session ID: " + str(sessionID))
logger.info("\n********* PRINT SESSION PROPERTIES *******")
tutorial.printSessionProperties(config,originalSessionHandle)
logger.info("\n********* PRINT SESSION STATISTICS *******")
tutorial.printSessionStatistics(originalSessionHandle)
logger.info("\n********* SETUP CONNECT LISTENER *******")
# START SNIPPET: reconnect_setup
con_listener = TutorialReconnectListener()
tutorial.get_network_element().set_connection_listener(con_listener,
{'user': tutorial.get_username(),
'pwd': tutorial.get_password(),
'id': sessionID,
'sess' : config})
# END SNIPPET: reconnect_setup
logger.info("\n********* SIMULATE INTERRUPTION OF CONNECTION *******")
tutorial.simulate_disconnect()
logger.info("\n********* DISCONNECT AND CLEAN UP *******\n\n")
con_listener.app_terminate = True
tutorial.disconnect()
| [
"[email protected]"
] | |
21db26de3198d180a5e39a545b3d434cfcfb9b71 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_117/ch84_2019_06_07_02_09_59_822281.py | c7e3553b0116c2d5fe47dde12a8fa2c6debf32c7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | def inverte_dicionario (dic):
dic_invert = {}
for k, v in dic_invert.items():
if v not in dic:
dic_invert = []
dic_invert.append(v)
return dic_invert | [
"[email protected]"
] | |
3dd56adae1191d1dbd4cb5db6911e9f04756571f | 4e693506b1b69b28ae2bcf0f5eb0d30e71a5e63d | /keras_models_factory/utils.py | 61d28442735ec316350b013d4e5cab50e1268d3f | [
"MIT"
] | permissive | shadiakiki1986/keras-models-factory | 62fabc7e786bc2e7ad85f00bf41abff85df57b35 | ee4f776eea0ec2e20347105d31cf192877f386bd | refs/heads/master | 2021-01-23T16:57:34.653001 | 2017-09-19T09:22:00 | 2017-09-19T09:22:00 | 102,754,603 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | # https://gist.github.com/shadiakiki1986/2c293e364563492c65bffdb6122b4e92
from sklearn.preprocessing import MinMaxScaler # normalize,
min_max_scaler = MinMaxScaler()
# def myNorm3(X): return normalize(X, norm='l2', axis=0)
def myNorm3(X): return min_max_scaler.fit_transform(X)
##########################################
import numpy as np
from matplotlib import pyplot as plt
def myPlot(X, space:int=5):
X_plt = X+space*np.arange(X.shape[1])
N_PLOT=200
plt.plot(X_plt[0:N_PLOT,:])
plt.show()
from sklearn.model_selection import train_test_split
def ae_fit_encode_plot_mse(X_in, autoencoder, encoder, N_epochs, verbose=1, callbacks:list=[]):
# split
X_train, X_test = train_test_split(X_in, train_size=0.8, random_state=8888)
# train autoencoder
autoencoder.fit(
X_train,
X_train,
epochs=N_epochs,
batch_size=256,
shuffle=True,
validation_data=(
X_test,
X_test,
),
verbose = verbose,
callbacks=callbacks
)
# if not easy to visualize
if X_in.shape[1]<50:
# print("encoder predict")
X_enc = encoder.predict(X_in)
# print("encoded",X_enc)
# # X_enc_dec = decoder.predict(X_enc)
# # print("enc-dec",X_enc_dec)
# X_rec = autoencoder.predict(X_pca)
# print("recoded",X_rec)
# plot
# from matplotlib import pyplot as plt
myPlot(X_enc)
X_rec = autoencoder.predict(X_in)
#result = mse(X_in, X_rec)
#print("AE mse = ", result)
#return result
return X_rec
#####################
# functions for t1e_pca_ae_nonlinear-2
# copied from https://stats.stackexchange.com/questions/190148/autoencoder-pca-tensorflow?rq=1
def mse(x, x_est):
numerator = np.linalg.norm(x - x_est)
denominator = np.linalg.norm(x)
#print('num/deonm', numerator, denominator, numerator/denominator)
return numerator/denominator
from sklearn.linear_model import LinearRegression
def pca_err(X, x_pca):
#from sklearn.decomposition import PCA
#pca = PCA(n_components=2).fit(X)
#x_pca = pca.transform(X)
lr = LinearRegression().fit(x_pca, X)
x_est = lr.predict(x_pca)
result = mse(X, x_est)
print('err pca = ', result)
return result
| [
"[email protected]"
] | |
bda67dea8cdb17417a447b603190fdbc5a7850d8 | 6351221d588668804e2df01936732eede4d96ed0 | /leetcode-cn/Python/232.用栈实现队列.py | 7ae75d5f682f19c9bda3328e8f390ed0abeb0c49 | [] | no_license | LogicJake/code-for-interview | 8e4ec9e24ec661a443ad42aa2496d78a1fbc8a3f | 5990b09866696c2f3e845047c755fa72553dd421 | refs/heads/master | 2021-09-20T20:19:17.118333 | 2021-09-14T13:46:30 | 2021-09-14T13:46:30 | 102,202,212 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | #
# @lc app=leetcode.cn id=232 lang=python3
#
# [232] 用栈实现队列
#
# @lc code=start
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
tmp_stack = []
while self.stack:
tmp_stack.append(self.stack.pop(-1))
self.stack.append(x)
while tmp_stack:
self.stack.append(tmp_stack.pop(-1))
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
return self.stack.pop(-1)
def peek(self) -> int:
"""
Get the front element.
"""
return self.stack[-1]
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return not self.stack
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty()
# @lc code=end
| [
"[email protected]"
] | |
cb0d026ba9bbf7fb071cfc018eaf8538a0285a2d | 9a343c495459e79dc408a102730bcaeac7fa8886 | /blog/app01/admin.py | d1211d0bccc7fd7e1a91cb51ce105a8e53f5ca8c | [
"MIT"
] | permissive | MMingLeung/Python_Study | 62d3ae92bf6760de0804aa5792f53fb3799486a2 | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | refs/heads/master | 2022-12-27T12:53:05.186800 | 2018-03-07T04:34:36 | 2018-03-07T04:34:36 | 92,124,981 | 3 | 1 | MIT | 2021-06-10T18:35:33 | 2017-05-23T03:28:52 | JavaScript | UTF-8 | Python | false | false | 452 | py | from django.contrib import admin
from app01 import models
# Register your models here.
admin.site.register(models.UserInfo)
admin.site.register(models.Article)
admin.site.register(models.ArticleDetail)
admin.site.register(models.Article2Tag)
admin.site.register(models.Tag)
admin.site.register(models.Category)
admin.site.register(models.UserFans)
admin.site.register(models.Blog)
admin.site.register(models.UpDown)
admin.site.register(models.Comment)
| [
"[email protected]"
] | |
00149d0616ecf21778b8fc9f4226f2e31c0455cf | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/container/node_pools/delete.py | 85c05b6a7ef85f44a9e6eb9c9c58a6ee068f7c38 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 4,208 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete node pool command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
DETAILED_HELP = {
'DESCRIPTION':
"""\
*{command}* deletes a node pool from a Google Kubernetes Engine (GKE)
cluster. When you delete a node pool, GKE drains all the nodes in the
node pool. The draining process involves GKE evicting Pods on each node
in the node pool. Each node in a node pool is drained by evicting Pods
with an allotted graceful termination period of `MAX_POD`. `MAX_POD` is
the maximum `terminationGracePeriodSeconds` set on the Pods scheduled to
the node with a cap of one hour.
""",
'EXAMPLES':
"""\
To delete the "node-pool-1" node pool from the cluster
"sample-cluster", run:
$ {command} node-pool-1 --cluster=sample-cluster
""",
}
class Delete(base.DeleteCommand):
"""Delete an existing node pool in a running cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
# TODO(b/28639250): Support remote completion when the SDK supports it.
flags.AddNodePoolNameArg(parser, 'The name of the node pool to delete.')
parser.add_argument(
'--timeout',
type=int,
default=1800,
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
flags.AddAsyncFlag(parser)
flags.AddNodePoolClusterFlag(
parser, 'The cluster from which to delete the node pool.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
pool_ref = adapter.ParseNodePool(args.name, location)
console_io.PromptContinue(
message=('The following node pool will be deleted.\n'
'[{name}] in cluster [{clusterId}] in [{zone}]').format(
name=pool_ref.nodePoolId,
clusterId=pool_ref.clusterId,
zone=adapter.Zone(pool_ref)),
throw_if_unattended=True,
cancel_on_no=True)
try:
# Make sure it exists (will raise appropriate error if not)
adapter.GetNodePool(pool_ref)
op_ref = adapter.DeleteNodePool(pool_ref)
if args.async_:
op = adapter.GetOperation(op_ref)
if not args.IsSpecified('format'):
args.format = util.OPERATIONS_FORMAT
return op
adapter.WaitForOperation(
op_ref,
'Deleting node pool {0}'.format(pool_ref.nodePoolId),
timeout_s=args.timeout)
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
log.DeletedResource(pool_ref)
return op_ref
Delete.detailed_help = DETAILED_HELP
| [
"[email protected]"
] | |
1b255b3ddd1df3b1b17cabceab2a798b41728384 | 164e0f43ef3ad4cb7f6b28dfdd2bfbaa66d38ce2 | /Remove_Invalid_Parentheses/Remove_Invalid_Parentheses.py | 034e14e6cc19233f7b8b6abc301cc84c82bcdc96 | [] | no_license | maoxx241/code | b217f2d10065d90f52cfa38788c99e238565b892 | 16e97ec5ee7ae9ffa69da2e001d15a86d73d2040 | refs/heads/master | 2021-07-11T14:25:35.098241 | 2020-11-25T14:01:56 | 2020-11-25T14:01:56 | 222,544,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | class Solution:
def removeInvalidParentheses(self, s: str) -> List[str]:
left = 0
right = 0
# First, we find out the number of misplaced left and right parentheses.
for char in s:
# Simply record the left one.
if char == '(':
left += 1
elif char == ')':
# If we don't have a matching left, then this is a misplaced right, record it.
right = right + 1 if left == 0 else right
# Decrement count of left parentheses because we have found a right
# which CAN be a matching one for a left.
left = left - 1 if left > 0 else left
result = {}
def recurse(s, index, left_count, right_count, left_rem, right_rem, expr):
# If we reached the end of the string, just check if the resulting expression is
# valid or not and also if we have removed the total number of left and right
# parentheses that we should have removed.
if index == len(s):
if left_rem == 0 and right_rem == 0:
ans = "".join(expr)
result[ans] = 1
else:
# The discard case. Note that here we have our pruning condition.
# We don't recurse if the remaining count for that parenthesis is == 0.
if (s[index] == '(' and left_rem > 0) or (s[index] == ')' and right_rem > 0):
recurse(s, index + 1,
left_count,
right_count,
left_rem - (s[index] == '('),
right_rem - (s[index] == ')'), expr)
expr.append(s[index])
# Simply recurse one step further if the current character is not a parenthesis.
if s[index] != '(' and s[index] != ')':
recurse(s, index + 1,
left_count,
right_count,
left_rem,
right_rem, expr)
elif s[index] == '(':
# Consider an opening bracket.
recurse(s, index + 1,
left_count + 1,
right_count,
left_rem,
right_rem, expr)
elif s[index] == ')' and left_count > right_count:
# Consider a closing bracket.
recurse(s, index + 1,
left_count,
right_count + 1,
left_rem,
right_rem, expr)
# Pop for backtracking.
expr.pop()
# Now, the left and right variables tell us the number of misplaced left and
# right parentheses and that greatly helps pruning the recursion.
recurse(s, 0, 0, 0, left, right, [])
return list(result.keys()) | [
"[email protected]"
] | |
69e237230ee8790bc12d09eeeae22d58d793a7de | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binary_20200525114701.py | e8cd6e394a55474ea53bc0c4231c7e1d52b17737 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | def solution(N):
print(N)
maximumCount = 0
number = format(N,"b")
print("wow",number)
s = [str(i) for i in number]
binary = int("".join(s))
intialNumber = None
answer = []
# totalCount = 0
# totalCount = 0
print("binary",number)
for i in range(len(str(number))):
if number[i] == '1':
if intialNumber is not None and (intialNumber + int(number[i]) == 2) and maximumCount > 0:
answer.append(maximumCount)
if totalCount > maximumCount:
print("total",totalCount)
return totalCount
else:
print("total",totalCount)
print("max",maximumCount)
# return maximumCount
else:
intialNumber = 1
# print("total",totalCount)
maximumCount = 0
totalCount = maximumCount
if number[i] == '0':
maximumCount +=1
# print("maxcount",maximumCount)
return 0
solution(10001) | [
"[email protected]"
] | |
80c7f0807075a35cdcec4e616e655da777916a79 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/Stack/225_ImplementStackusingQueues.py | 605f337ca4287019202fd4d6bbc9e42ac90b852a | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 768 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: [email protected]
from collections import deque
class Stack(object):
def __init__(self):
self._queue = deque()
def push(self, x):
# Pushing to back and
# then rotating the queue until the new element is at the front
q = self._queue
q.append(x)
for i in xrange(len(q) - 1):
q.append(q.popleft())
def pop(self):
self._queue.popleft()
def top(self):
return self._queue[0]
def empty(self):
return not len(self._queue)
"""Test
if __name__ == '__main__':
s = Stack()
s.push(1)
s.push(2)
print s.top()
s.pop()
print s.empty()
print s.top()
s.pop()
print s.empty()
"""
| [
"[email protected]"
] | |
fc78dfb58fc28e5c173a243deee5202c75eb4f57 | 5c4a6130f7b09a652f4cd8feb2132b174cf00f25 | /test-mab_3.py | 108f2d602597592c73b6cf43d6b145e779ff5757 | [] | no_license | xiaogaogaoxiao/Machine-Learning-Algorithm-for-Vehicular-Communication-Networks | 3ebbb023e6b25dafbbd2cdf4f412c83c0c6ac0b6 | c9b8b0f5f4f57ae6506c80956e48bad3ac9457ad | refs/heads/main | 2023-06-02T19:58:17.197528 | 2021-06-21T00:33:44 | 2021-06-21T00:33:44 | 445,976,561 | 1 | 0 | null | 2022-01-09T02:38:07 | 2022-01-09T02:38:07 | null | UTF-8 | Python | false | false | 24,725 | py | '''
This example shows how a base station chooses a beam to serve an approaching
vehicle. The base station has two beam configurations (a long reaching but
narrow one, and a short reaching but wide one). We assume that the base
station can only activate one beam at a time, with either configuration.
The example uses multi-armed bandit (MAB) to select the best beam to serve.
'''
import wx
import operator
import argparse
import random
from argparse import Namespace, ArgumentParser
from sim.simulation import World
from sim.loc import XY
from sim.scenario import BaseScenario
from sim.event import Event
from node.node import BaseNode
from node.mobility import Stationary, StaticPath
from comm.transceiver import Transceiver
from comm.channel import DiscModel, SectorModel
from comm.signalwave import QualityBasedSignal
####################################################################
## Helper
####################################################################
class DebugPrint:
def print(self, *args, **kw):
print(*args, **kw) # comment this line out to disable debug printing
pass
####################################################################
## Communication Module
####################################################################
class CommModule:
def __init__(self, node):
self._node = node
## send hello message, and get replied, record channel quality indicator (cqi)
## return a tuple: (outcome, cqi)
def send_hello_to(self, other):
cqi = 0
me = self._node
# send hello-message
hello_message = QualityBasedSignal(me)
if me.get("transceiver").unicast(hello_message, other)==None:
return (False, cqi) # signal can't reach other? return False
# receiver replies with hello-reply
hello_reply = QualityBasedSignal(me)
if other.get("transceiver").unicast(hello_reply, me)==None:
return (False, cqi) # reply can't reach me? return False
# hello-reply can reach me, now check the signal quality
recv_signal = me.get("transceiver").received_signal(other,hello_reply)
if not me.get("transceiver").can_detect(recv_signal):
return (False, cqi) # can not detect? return False
# return cqi
cqi = recv_signal.quality
return (True, cqi)
####################################################################
## Nodes
####################################################################
class MyBS(BaseNode):
'''
MyBS: This is a base station in the VANET sim world. In our system
a BS is a radio head, which is just a beam.
'''
def __init__(self, simworld, id, loc, channel):
super().__init__(simworld, id, node_type=BaseNode.Type.BS)
## beam configuration variables
self.set_transceiver(Transceiver(self,channel))
self.channel_property = channel.get_property()
self.comm = CommModule(self)
self.set_mobility(Stationary(loc))
## beam runtime variables for service status
self.service_node = None
self.service_duration = 0
## MAB related variables
self.total_reward = 0
self.total_trial = 0
def associate_vehicle(self,node,time):
self.service_node = node
self.service_node.associated_bs = self
self.service_duration = 0
def lost_vehicle(self,time):
self.service_node.associated_bs = None
self.service_node = None
## Multi-Armed Bandit: update reward after pulling this arm (i.e. this beam)
def MAB_update_reward(self, reward):
self.total_reward += reward
self.total_trial += 1
## Multi-Armed Bandit: calculate expected reward for this arm (i.e. this beam)
def MAB_get_average_reward(self):
if self.total_trial==0:
return 0
return self.total_reward/self.total_trial
## show the coverage of this BS
def show_coverage(self):
self.clear_drawing()
if self.channel_property["model"]=="DiscModel":
if self.service_node!=None:
self.draw_circle(self.channel_property["radius"])
else:
pen = wx.Pen(wx.RED,2,style=wx.PENSTYLE_LONG_DASH)
if self.service_node!=None:
brush = wx.Brush(wx.RED,style=wx.BRUSHSTYLE_BDIAGONAL_HATCH)
else:
brush = wx.Brush(wx.RED,style=wx.TRANSPARENT)
self.draw_sector(self.channel_property["radius"],
self.channel_property["azimuth"],
self.channel_property["beam width"],
pen, brush)
# def show_coverage(self):
# self.clear_drawing()
# if self.channel_property["model"]=="DiscModel":
# if self.service_node!=None:
# self.draw_circle(self.channel_property["radius"])
# elif self.channel_property["model"]=="SectorModel":
# if self.channel_property["beam width"]==60:
# pen = wx.Pen(wx.RED,2,style=wx.PENSTYLE_LONG_DASH)
# else:
# pen = wx.Pen(wx.BLACK,4,style=wx.PENSTYLE_SHORT_DASH)
# if self.service_node!=None:
# brush = wx.Brush(wx.RED,style=wx.BRUSHSTYLE_BDIAGONAL_HATCH)
# else:
# brush = wx.Brush(wx.RED,style=wx.TRANSPARENT)
# self.draw_sector(self.channel_property["radius"],
# self.channel_property["azimuth"],
# self.channel_property["beam width"],
# pen, brush)
class MyVehicle(BaseNode):
'''
MyVehicle: This is a transmitting node in the VANET sim world
'''
def __init__(self, simworld, id, channel):
super().__init__(simworld, id, node_type=BaseNode.Type.Vehicle)
## vehicle configuration variables
self.set_transceiver(Transceiver(self,channel))
self.comm = CommModule(self)
## vehicle runtime variables for service status
self.associated_bs = None
def associate_bs(self,bs,time):
bs.associate_vehicle(self,time)
def lost_bs(self,time):
self.associated_bs.lost_vehicle(time)
## draw a line to the associated BS, if any
def show_connection(self):
self.clear_drawing()
if self.associated_bs!=None:
self.draw_line(self.associated_bs,pen = wx.Pen(wx.BLUE,2,style=wx.PENSTYLE_SOLID))
self.set_color(wx.BLUE)
else:
self.set_color(wx.RED)
####################################################################
## Scenario
####################################################################
class MyScenario(BaseScenario,DebugPrint):
'''
MyScenario: This is my scenario
'''
## ------------------------------------------------------------
## This method will be called before the start of the simulation,
## build the simulation world here
def on_create(self, simworld) -> bool:
## for statistics
self.last_sim_time = 0
## simulation variables
self.simworld = simworld
if self.simworld.is_animation_shown():
bitmap = wx.Bitmap()
if bitmap.LoadFile("croydon.png"):
self.set_background(bitmap,-500,400)
else:
print("Error loading bitmap file, no background is applied.")
self.set_name("Beam selection example")
## define a set of sectors covering 360 degree
class Sectors:
def __init__(self, freq, radius, sector_number, pointing):
self.all_beams = []
beam_width = 360/sector_number
for i in range(0,sector_number):
angle = pointing + i*beam_width
while angle>=360: angle-=360
sector = SectorModel(freq, radius, beam_width, angle)
self.all_beams.append(sector)
## create a channel
freq = 2.4
radius = 120
self.omni = DiscModel(freq, radius)
## create some sector beams and channels
## - type-1: long but narrow beams
## - type-2: short but wide beams
## - `pointing` is the pointing direction of the first beam in the sector
sector1 = Sectors(freq, radius=120,sector_number=6,pointing=0)
# sector2 = Sectors(freq, radius=80,sector_number=3,pointing=90)
## create a base station on the map
self.bs = []
bs_locs = [XY(200,0)] # locations
for i in range(0,len(bs_locs)): # For each BS, do the following
j = 0
for beam in sector1.all_beams: # add type 1 narrow beams
this_id = "BS%d.%dN"%(i,j); j+=1
this_node = MyBS(simworld, this_id, bs_locs[i], channel=beam)
self.bs.append(this_node)
j = 0
# for beam in sector2.all_beams: # add type 2 wide beams
# this_id = "BS%d.%dW"%(i,j); j+=1
# this_node = MyBS(simworld, this_id, bs_locs[i], channel=beam)
# self.bs.append(this_node)
## create the vehicles on a site
self.vehicles = []
self.vehicle_start_info = {}
self.vehicle_start_info["car1"] = [XY(200,140)]
self.vehicle_start_info["car1_faster"] = [XY(200,140)]
self.vehicle_start_info["car2_slower"] = [XY(200,140)]
self.vehicle_start_info["car2"] = [XY(200,140)]
self.vehicle_start_info["car2_faster"] = [XY(200,140)]
self.vehicle_start_info["car3"] = [XY(180, 190)]
self.vehicle_start_info["car3_faster"] = [XY(180, 190)]
self.vehicle_start_info["car4"] = [XY(180, 190)]
self.vehicle_start_info["car4_faster"] = [XY(180, 190)]
self.vehicle_start_info["car4_inverse"] = [XY(360, 80)]
self.vehicle_start_info["car4_inverse_faster"] = [XY(360, 80)]
self.vehicle_start_info["car5"] = [XY(200, 130)]
self.vehicle_start_info["car5_faster"] = [XY(200, 130)]
self.vehicle_start_info["car5_inverse"] = [XY(60, 15)]
self.vehicle_start_info["car5_inverse_faster"] = [XY(60, 15)]
self.vehicle_start_info["car6"] = [XY(200, 130)]
self.vehicle_start_info["car6_faster"] = [XY(200, 130)]
self.vehicle_start_info["car6_inverse"] = [XY(265, -180)]
self.vehicle_start_info["car6_inverse_faster"] = [XY(265, -180)]
self.vehicle_path_info = {}
self.vehicle_path_info["car1"] = [(random.uniform(20, 30), XY(230,90)),
(random.uniform(20, 30), XY(255,0)),
(random.uniform(20, 30), XY(240,-45)),
(random.uniform(20, 30), XY(40,-240)) ]
self.vehicle_path_info["car1_faster"] = [(random.uniform(80, 100), XY(230, 90)),
(random.uniform(80, 100), XY(255, 0)),
(random.uniform(80, 100), XY(240, -45)),
(random.uniform(80, 100), XY(40, -240))]
self.vehicle_path_info["car2_slower"] = [(random.uniform(20, 30), XY(230, 90)),
(random.uniform(20, 30), XY(270, 105)),
(random.uniform(20, 30), XY(440, 180))]
self.vehicle_path_info["car2"] = [(random.uniform(50, 70), XY(230,90)),
(random.uniform(50, 80), XY(270,105)),
(random.uniform(50, 70), XY(440,180)) ]
self.vehicle_path_info["car2_faster"] = [(random.uniform(60, 100), XY(230,90)),
(random.uniform(60, 100), XY(270,105)),
(random.uniform(60, 100), XY(440,180)) ]
self.vehicle_path_info["car3"] = [(random.uniform(50, 80), XY(205, 130)),
(random.uniform(50, 70), XY(100, -60)),
(random.uniform(50, 80), XY(50, -150))]
self.vehicle_path_info["car3_faster"] = [(random.uniform(60, 100), XY(205, 130)),
(random.uniform(60, 100), XY(100, -60)),
(random.uniform(60, 100), XY(50, -150))]
self.vehicle_path_info["car4"] = [(random.uniform(50, 70), XY(230,90)),
(random.uniform(50, 80), XY(255, -20)),
(random.uniform(50, 70), XY(310, 40)),
(random.uniform(50, 80), XY(360, 80))]
self.vehicle_path_info["car4_faster"] = [(random.uniform(60, 100), XY(230,90)),
(random.uniform(60, 100), XY(255, -20)),
(random.uniform(60, 100), XY(310, 40)),
(random.uniform(60, 100), XY(360, 80))]
self.vehicle_path_info["car4_inverse"] = [(random.uniform(50, 70),XY(310, 40) ),
(random.uniform(50, 80), XY(255, -20)),
(random.uniform(50, 70), XY(230, 90)),
(random.uniform(50, 80), XY(180, 190))]
self.vehicle_path_info["car4_inverse_faster"] = [(random.uniform(60, 100),XY(310, 40) ),
(random.uniform(60, 100), XY(255, -20)),
(random.uniform(60, 100), XY(230, 90)),
(random.uniform(60, 100), XY(180, 190))]
self.vehicle_path_info["car5"] = [(random.uniform(50, 70), XY(130, -10)),
(random.uniform(50, 80), XY(75, 10)),
(random.uniform(50, 70), XY(60, 15))]
self.vehicle_path_info["car5_faster"] = [(random.uniform(60, 100), XY(130, -10)),
(random.uniform(60, 100), XY(75, 10)),
(random.uniform(60, 100), XY(60, 15))]
self.vehicle_path_info["car5_inverse"] = [(random.uniform(50, 70), XY(75, 10)),
(random.uniform(50, 80), XY(130, -10)),
(random.uniform(50, 70), XY(200, 130))]
self.vehicle_path_info["car5_inverse_faster"] = [(random.uniform(60, 100), XY(75, 10)),
(random.uniform(60, 100), XY(130, -10)),
(random.uniform(60, 100), XY(200, 130))]
self.vehicle_path_info["car6"] = [(random.uniform(50, 70), XY(235, 65)),
(random.uniform(50, 80), XY(250, -20)),
(random.uniform(50, 70), XY(225, -75)),
(random.uniform(50, 80), XY(250, -130))]
self.vehicle_path_info["car6_faster"] = [(random.uniform(60, 100), XY(235, 65)),
(random.uniform(60, 100), XY(250, -20)),
(random.uniform(60, 100), XY(225, -75)),
(random.uniform(60, 100), XY(250, -130))]
self.vehicle_path_info["car6_inverse"] = [(random.uniform(50, 70), XY(225, -75)),
(random.uniform(50, 80), XY(250, -20)),
(random.uniform(50, 70), XY(235, 65)),
(random.uniform(50, 80), XY(200, 130))]
self.vehicle_path_info["car6_inverse_faster"] = [(random.uniform(60, 100), XY(225, -75)),
(random.uniform(60, 100), XY(250, -20)),
(random.uniform(60, 100), XY(235, 65)),
(random.uniform(60, 100), XY(200, 130))]
for info in self.vehicle_start_info:
self.start_loc = self.vehicle_start_info[info][0]
self.path = self.vehicle_path_info[info]
node = MyVehicle(simworld, id=info, channel=self.omni)
node.set_mobility(StaticPath(start_loc=self.start_loc,path=self.path))
self.vehicles.append(node)
## show all beams
for beam in self.bs:
beam.show_coverage()
return True
## --------------------------------------------------------
## This method will be called repeatedly until the simulation
## is ended or stopped, perform any simulation action here
def on_event(self, sim_time, event_obj):
duration = sim_time - self.last_sim_time
self.last_sim_time = sim_time
if event_obj==Event.MOBILITY_END: # a mobile node has finished its mobility?
self.do_mobility(sim_time,duration,event_obj)
self.do_restart_node(sim_time,event_obj)
elif event_obj==Event.SIM_MOBILITY: # mobility progresses a time step?
self.do_mobility(sim_time,duration,event_obj)
## end of mobility, then create a new vehicle to replace this one
def do_restart_node(self, sim_time, event_obj):
this_node = event_obj.info["node"] # retrieve the node reaching end of mobility
new_node = MyVehicle(self.simworld, id=this_node.id, channel=self.omni)
new_node.set_mobility(StaticPath(start_loc=self.vehicle_start_info[this_node.id][0], path=self.vehicle_path_info[this_node.id]))
self.vehicles.append(new_node) # add new node to our list
self.vehicles.remove(this_node) # remove old node from our list
this_node.remove_from_simulation() # remove old node from the simulation
## Do user simulation here
def do_mobility(self, sim_time, duration, event_obj):
all_vehicles = self.vehicles # get all vehicles from our liist
all_beams = self.bs # get all BSs from our list
connect_time = 0
## collect stats for beams for the last period
for beam in all_beams:
if beam.service_node!=None:
beam.service_duration += duration
## check beam connectivity with its serving vehicle
active_beam_number = 0
for beam in all_beams:
if beam.service_node==None: continue # skip if none
vehicle = beam.service_node
active_beam_number += 1 # found an active beam
(is_successful, cqi) = vehicle.comm.send_hello_to(beam)
if not is_successful: # can't hear from vehicle, i.e. lost connection
## update reward based on service duration
## this is a random reward due to random vehicle speed
beam.MAB_update_reward(beam.service_duration)
# self.print("at t = %1.2f, %s lost connection, duration time is %1.2f, "
# "Beam %s total connection time is %1.2f"
# %(sim_time, beam.service_node.id, beam.service_duration, beam.id, beam.total_reward))
# self.print("%s current total_reward is %1.2f, current total_trail is %s, average reward is %1.2f"
# %(beam.id, beam.total_reward, beam.total_trial, beam.total_reward/beam.total_trial))
beam.lost_vehicle(sim_time)
active_beam_number -= 1 # can't count this beam as active
## find a vehicle to serve if bs is available (i.e. currently no active beam)
## in this example, we limit the service to one vehicle maxmimum
if active_beam_number < 1:
## iterate all beams to find potential vehicles to serve
## each potential service is an `arm`
arm_list = [] # list of available `arms` to pull in multi-armed bandit
for beam in all_beams:
beacon = QualityBasedSignal(beam)
node_list = beam.get("transceiver").broadcast(beacon)
if beam.service_node != None: continue
for node in node_list:
## check that the reachable node is a vehicle
if node.type!=BaseNode.Type.Vehicle: continue # skip if not vehicle
# if node.associated_bs!=None: continue # skip if already being served
## check also it is in the coverage of the beam
(is_successful, cqi) = node.comm.send_hello_to(beam)
if not is_successful: continue # skip if failed, likely not in coverage
## add this option as an `arm` to the `arm_list`
arm = beam
reward_expectation = beam.MAB_get_average_reward()
arm_list.append((arm, reward_expectation, node, cqi))
## for exploration, pick a random arm
## for exploitation, pick the highest expected reward arm
selected_beam = None
if len(arm_list)!=0:
if sim_time<60: # do exploration in the first 200s
random_number = random.random()
if random_number > 0.1:
(selected_beam,_,vehicle, cqi) = random.choice(arm_list)
reason = "based on exploration (random pull)"
else:
for i in arm_list:
self.print("the %s cqi is: %1.2f" % (i[2].id, i[3]))
(selected_beam,_,vehicle, cqi) = max(arm_list,key=operator.itemgetter(3))
reason = "based on exploration (best cqi)"
else: # do exploitation
(selected_beam,_,vehicle, cqi) = max(arm_list,key=operator.itemgetter(1))
reason = "by choosing the best arm"
n = len(arm_list)
self.print("%s final average reaward is:%1.2f " % (arm_list[0][0].id, arm_list[0][1]))
for i in range(1, n):
last = i - 1
last_id = arm_list[last][0].id
current_id = arm_list[i][0].id
if last_id == current_id: continue
self.print("%s final average reaward is:%1.2f " % (arm_list[i][0].id, arm_list[i][1]))
if selected_beam!=None:
selected_beam.associate_vehicle(vehicle,sim_time)
self.print("at t=%1.2f, %s connected to %s %s"
%(sim_time,vehicle.id,selected_beam.id,reason))
## draw connectivity & beam coverage on the map
for vehicle in all_vehicles:
vehicle.show_connection()
for beam in all_beams:
beam.show_coverage()
####################################################################
## main
####################################################################
if __name__ == "__main__":
## command line parameters
parser: ArgumentParser = argparse.ArgumentParser()
parser.add_argument("--nodisplay", help="Run in no GUI mode", action="store_true")
parser.add_argument("--step", help="Mobility step time (in sec)", type=int, default=0.2)
parser.add_argument("--speed", help="Animation playback speed (x times)", type=float, default=1.0)
parser.add_argument("--duration", help="Simulation duration (in sec), -1 for non-stop", type=int, default=1)
args: Namespace = parser.parse_args()
## welcome info
print("A Simple VANET Environment. Press [^C] to quit")
#args.nodisplay = True # <-- hardcoding no GUI mode
args.step = 0.1 # <-- hardcoding the mobility step time
args.speed = 1.0 # <-- hardcoding the animation speed (times)
args.duration = -1 # <-- hardcoding the sim duration (sec)
if args.nodisplay: print("- simulation will run without animation")
else: print("- animation will playback at x%1.2f speed"%args.speed)
print("- vehicles move a step every %1.2f s in simulation"%args.step)
if args.duration>0: print("- simulation will stop at %1.2f s"%args.duration)
else: print("- simulation will run non-stop")
print("")
## create, setup and run the simulation
## note that to run a simulation, we need to create a 'scenario'
sim = World()
sim.config(sim_stop = args.duration,
sim_step = args.step,
sim_speed = args.speed,
display_option = not args.nodisplay,
scenario = MyScenario(sim))
sim.run()
| [
"[email protected]"
] | |
128cca6fe2e6e5e784f55a61facb0487d837a808 | 127fa3dd454434b4c7526afe161177af2e10226e | /learn/python_base/io.py | 60348faa4adbf54d5d67904473a557cd1779d9e7 | [] | no_license | lunar-r/sword-to-offer-python | 966c46a8ddcff8ce5c95697638c988d83da3beab | fab4c341486e872fb2926d1b6d50499d55e76a4a | refs/heads/master | 2023-04-18T18:57:12.126441 | 2020-11-29T09:51:23 | 2020-11-29T09:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | # -*- coding: utf-8 -*-
"""
File Name: io
Description :
Author : simon
date: 19-3-28
"""
# -- 文件基本操作
output = open(r'C:\spam', 'w') # 打开输出文件,用于写
input = open('data', 'r') # 打开输入文件,用于读。打开的方式可以为'w', 'r', 'a', 'wb', 'rb', 'ab'等
fp.read([size]) # size为读取的长度,以byte为单位
fp.readline([size]) # 读一行,如果定义了size,有可能返回的只是一行的一部分
fp.readlines([size]) # 把文件每一行作为一个list的一个成员,并返回这个list。其实它的内部是通过循环调用readline()来实现的。如果提供size参数,size是表示读取内容的总长。
fp.readable() # 是否可读
fp.write(str) # 把str写到文件中,write()并不会在str后加上一个换行符
fp.writelines(seq) # 把seq的内容全部写到文件中(多行一次性写入)
fp.writeable() # 是否可写
fp.close() # 关闭文件。
fp.flush() # 把缓冲区的内容写入硬盘
fp.fileno() # 返回一个长整型的”文件标签“
fp.isatty() # 文件是否是一个终端设备文件(unix系统中的)
fp.tell() # 返回文件操作标记的当前位置,以文件的开头为原点
fp.next() # 返回下一行,并将文件操作标记位移到下一行。把一个file用于for … in file这样的语句时,就是调用next()函数来实现遍历的。
fp.seek(offset[, whence]) # 将文件打开操作标记移到offset的位置。whence为0表示从头开始计算,1表示以当前位置为原点计算。2表示以文件末尾为原点进行计算。
fp.seekable() # 是否可以seek
fp.truncate([size]) # 把文件裁成规定的大小,默认的是裁到当前文件操作标记的位置。
for line in open('data'):
print(line) # 使用for语句,比较适用于打开比较大的文件
with open('data') as file:
print(file.readline()) # 使用with语句,可以保证文件关闭
with open('data') as file:
lines = file.readlines() # 一次读入文件所有行,并关闭文件
open('f.txt', encoding='latin-1') # Python3.x Unicode文本文件
open('f.bin', 'rb') # Python3.x 二进制bytes文件
# 文件对象还有相应的属性:buffer closed encoding errors line_buffering name newlines等 | [
"[email protected]"
] | |
404b3dd9bf9118947a73b4b22ab44cac0e5361bd | d5e94042ac2b248b7701117a6ea941bcc862067a | /upvote/gae/modules/bit9_api/constants.py | f473708c778b50b28f70a3411e000b6fa473e0de | [
"Apache-2.0"
] | permissive | codegrande/upvote | f373105203a0595f76c29e138a18a95dc24a63df | e05d477bb13e470127b109eb8905a66a06eed5ac | refs/heads/master | 2020-03-07T19:40:47.185833 | 2019-06-20T14:35:20 | 2019-06-20T14:35:20 | 127,677,753 | 0 | 0 | null | 2018-04-01T22:49:28 | 2018-04-01T22:49:27 | null | UTF-8 | Python | false | false | 2,558 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in communication with Bit9."""
from upvote.shared import constants
# RFC 3339/ISO 8601 datetime format.
DATETIME_CONVERSION_STRING = '%Y-%m-%dT%H:%M:%SZ'
DATETIME_CONVERSION_STRING_USEC = '%Y-%m-%dT%H:%M:%S.%fZ'
OLD_DATETIME_CONVERSION_STRING = '%Y-%m-%d %H:%M:%S'
# A subtype is the classification of the kind of event.
SUBTYPE = constants.Namespace(tuples=[
# A file was blocked because it was unapproved.
('UNAPPROVED', 801),
# A file was blocked because it was banned.
('BANNED', 802),
# A file was blocked because of a user response to a prompt.
('PROMPTED_BLOCKED', 837),
# A file was approved because of a user response to a prompt.
('PROMPTED_APPROVED', 838),
# A file was blocked because of a timeout waiting for user response.
('PROMPTED_TIMED_OUT', 839)])
APPROVAL_STATE = constants.Namespace(
tuples=[('UNAPPROVED', 1), ('APPROVED', 2), ('BANNED', 3)])
APPROVAL_STATE.DefineMap('TO_STR', {
APPROVAL_STATE.UNAPPROVED: 'UNAPPROVED',
APPROVAL_STATE.APPROVED: 'APPROVED',
APPROVAL_STATE.BANNED: 'BANNED'})
SHA256_TYPE = constants.Namespace(tuples=[('REGULAR', 5), ('FUZZY', 6)])
SHA256_TYPE.DefineMap('TO_ID_TYPE', {
SHA256_TYPE.REGULAR: constants.ID_TYPE.SHA256,
SHA256_TYPE.FUZZY: constants.ID_TYPE.FUZZY_SHA256})
class FileFlags(object):
"""File flags for a Bit9 file catalog."""
MARKED_INSTALLER = 0x00004
DETECTED_INSTALLER = 0x00010
MARKED_NOT_INSTALLER = 0x10000
class UpvoteHostHealthProperties(object):
"""Host health properties."""
AGENT_CACHE_SIZE = 'agent_cache_size'
AGENT_VERSION = 'agent_version'
CONNECTED = 'connected'
HAS_HEALTH_CHECK_ERRORS = 'has_health_check_errors'
IS_INITIALIZING = 'is_initializing'
LAST_REGISTER_DATE = 'last_register_date'
NAME = 'name'
POLICY_NAME = 'policy_name'
POLICY_STATUS = 'policy_status'
POLICY_STATUS_DETAILS = 'policy_status_details'
UPGRADE_STATUS = 'upgrade_status'
| [
"[email protected]"
] | |
62dbc06cc71f3f8a7e37df306f12fd1e96d86336 | 284f2bfaabf91899211e56063026857c496965cf | /users/mixins.py | 9da8e98128b9d6e173ab42e6c559d7402fc769a5 | [] | no_license | vanessa/building-tuirer | 7b56bb9791659fcd04942d2c84a393c3c226f8c4 | 61d85df7d120387700b2e449a6fde5fb9ca7cfaa | refs/heads/master | 2022-12-11T07:25:14.174448 | 2018-08-07T05:18:29 | 2018-08-07T05:18:29 | 142,210,249 | 18 | 0 | null | 2022-12-08T02:19:48 | 2018-07-24T20:35:34 | Python | UTF-8 | Python | false | false | 706 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from users.models import User
from django.shortcuts import redirect
from django.contrib import messages
class ProfileAccessMixin(LoginRequiredMixin):
def handle_no_permission(self):
# Mostrando mensagens
messages.error(
self.request,
'Você não pode editar um perfil que não é seu!'
)
return redirect('index')
def dispatch(self, request, *args, **kwargs):
user_pk = kwargs.get('pk')
user = User.objects.get(pk=user_pk)
if not user == request.user:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
| [
"[email protected]"
] | |
5463c902b0d00d5e90378a570e33e19db4e6b638 | 31a0b0749c30ff37c3a72592387f9d8195de4bd6 | /release/ray_release/scripts/run_release_test.py | 6729c6a6630ae109dcf0bf0513abdb49074b30e0 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | longshotsyndicate/ray | 15100bad514b602a3fa39bfe205288e7bec75d90 | 3341fae573868338b665bcea8a1c4ee86b702751 | refs/heads/master | 2023-01-28T15:16:00.401509 | 2022-02-18T05:35:47 | 2022-02-18T05:35:47 | 163,961,795 | 1 | 1 | Apache-2.0 | 2023-01-14T08:01:02 | 2019-01-03T11:03:35 | Python | UTF-8 | Python | false | false | 3,840 | py | import os
import sys
from typing import Optional
import click
from ray_release.aws import maybe_fetch_api_token
from ray_release.config import (
read_and_validate_release_test_collection,
find_test,
as_smoke_test,
DEFAULT_WHEEL_WAIT_TIMEOUT,
)
from ray_release.exception import ReleaseTestCLIError, ReleaseTestError
from ray_release.glue import run_release_test
from ray_release.logger import logger
from ray_release.reporter.legacy_rds import LegacyRDSReporter
from ray_release.reporter.log import LogReporter
from ray_release.result import Result
from ray_release.wheels import find_and_wait_for_ray_wheels_url
@click.command()
@click.argument("test_name", required=True, type=str)
@click.option(
"--test-collection-file",
default=None,
type=str,
help="File containing test configurations",
)
@click.option(
"--smoke-test",
default=False,
type=bool,
is_flag=True,
help="Finish quickly for testing",
)
@click.option(
"--report",
default=False,
type=bool,
is_flag=True,
help="Report results to database",
)
@click.option(
"--ray-wheels",
default=None,
type=str,
help=(
"Commit hash or URL to Ray wheels to be used for testing. "
"If empty, defaults to the BUILDKITE_COMMIT env variable. "
"Can be e.g. `master` to fetch latest wheels from the "
"Ray master branch. Can also be `<repo_url>:<branch>` or "
"`<repo_url>:<commit>` to specify a different repository to "
"fetch wheels from, if available."
),
)
@click.option(
"--cluster-id",
default=None,
type=str,
help="Cluster ID of existing cluster to be re-used.",
)
@click.option(
"--cluster-env-id",
default=None,
type=str,
help="Cluster env ID of existing cluster env to be re-used.",
)
@click.option(
"--no-terminate",
default=False,
type=bool,
is_flag=True,
help="Do not terminate cluster after test.",
)
def main(
test_name: str,
test_collection_file: Optional[str] = None,
smoke_test: bool = False,
report: bool = False,
ray_wheels: Optional[str] = None,
cluster_id: Optional[str] = None,
cluster_env_id: Optional[str] = None,
no_terminate: bool = False,
):
test_collection_file = test_collection_file or os.path.join(
os.path.dirname(__file__), "..", "..", "release_tests.yaml"
)
test_collection = read_and_validate_release_test_collection(test_collection_file)
test = find_test(test_collection, test_name)
if not test:
raise ReleaseTestCLIError(
f"Test `{test_name}` not found in collection file: "
f"{test_collection_file}"
)
if smoke_test:
test = as_smoke_test(test)
ray_wheels_url = find_and_wait_for_ray_wheels_url(
ray_wheels, timeout=DEFAULT_WHEEL_WAIT_TIMEOUT
)
anyscale_project = os.environ.get("ANYSCALE_PROJECT", None)
if not anyscale_project:
raise ReleaseTestCLIError(
"You have to set the ANYSCALE_PROJECT environment variable!"
)
maybe_fetch_api_token()
result = Result()
reporters = [LogReporter()]
if report:
reporters.append(LegacyRDSReporter())
try:
result = run_release_test(
test,
anyscale_project=anyscale_project,
result=result,
ray_wheels_url=ray_wheels_url,
reporters=reporters,
cluster_id=cluster_id,
cluster_env_id=cluster_env_id,
no_terminate=no_terminate,
)
except ReleaseTestError as e:
logger.exception(e)
logger.info(
f"Release test pipeline for test {test['name']} completed. "
f"Returning with exit code = {result.return_code}"
)
sys.exit(result.return_code)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7d59995c77d2bfd70c4e9e24e6d9add01ba90bfb | 705ca924bc63e8b324b847b21263f823219280e1 | /apps/its_login_register/migrations/0006_job.py | 6e32a697bb0da3c4dffe8fc695f96d8bda5c8dfd | [] | no_license | Komaldhall/Helping-Hand | 46a28f70045029794b0feb502db1ce3c8ba721e3 | a544b3812d3eb968233cfd28464c321f3bc997af | refs/heads/master | 2020-04-16T09:29:15.308558 | 2019-01-13T08:44:26 | 2019-01-13T08:44:26 | 165,465,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | # Generated by Django 2.0.7 on 2018-07-20 18:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('its_login_register', '0005_auto_20180720_1103'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('desc', models.TextField()),
('location', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post', to='its_login_register.User')),
],
),
]
| [
"[email protected]"
] | |
782369aa9e5911c9a60e033e2124834fa92cff51 | 87849e7794e223214b3e40896c708d4ea17f2a12 | /tests/test_autogen_computed.py | 1144560dca5c23996719909df7610844ed7d95cb | [
"MIT"
] | permissive | novafacing/alembic | 0b6d9bfa9a66bd4883e863a6ce70a7094c9bb85b | 29ff74c2678ab73f6c5a646477c840f5cdded234 | refs/heads/master | 2021-01-14T15:21:53.344810 | 2020-02-24T06:22:46 | 2020-02-24T06:22:46 | 242,660,622 | 0 | 0 | MIT | 2020-02-24T06:13:49 | 2020-02-24T06:13:49 | null | UTF-8 | Python | false | false | 4,577 | py | import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from alembic import testing
from alembic.testing import config
from alembic.testing import eq_
from alembic.testing import exclusions
from alembic.testing import is_
from alembic.testing import is_true
from alembic.testing import TestBase
from ._autogen_fixtures import AutogenFixtureTest
class AutogenerateComputedTest(AutogenFixtureTest, TestBase):
__requires__ = ("computed_columns",)
__backend__ = True
def test_add_computed_column(self):
m1 = MetaData()
m2 = MetaData()
Table("user", m1, Column("id", Integer, primary_key=True))
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("foo", Integer, sa.Computed("5")),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_column")
eq_(diffs[0][2], "user")
eq_(diffs[0][3].name, "foo")
c = diffs[0][3].computed
is_true(isinstance(c, sa.Computed))
is_(c.persisted, None)
eq_(str(c.sqltext), "5")
def test_remove_computed_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("foo", Integer, sa.Computed("5")),
)
Table("user", m2, Column("id", Integer, primary_key=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_column")
eq_(diffs[0][2], "user")
c = diffs[0][3]
eq_(c.name, "foo")
is_(c.computed, None)
if config.requirements.computed_reflects_as_server_default.enabled:
is_true(isinstance(c.server_default, sa.DefaultClause))
eq_(str(c.server_default.arg.text), "5")
else:
is_(c.server_default, None)
@testing.combinations(
lambda: (sa.Computed("5"), sa.Computed("5")),
lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")),
lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")),
lambda: (
sa.Computed("bar*5"),
sa.Computed("bar * 42", persisted=True),
),
lambda: (None, sa.Computed("bar*5")),
(
lambda: (sa.Computed("bar*5"), None),
config.requirements.computed_doesnt_reflect_as_server_default,
),
)
def test_computed_unchanged(self, test_case):
arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
m1 = MetaData()
m2 = MetaData()
arg_before = [] if arg_before is None else [arg_before]
arg_after = [] if arg_after is None else [arg_after]
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_before),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_after),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs), 0)
@config.requirements.computed_reflects_as_server_default
def test_remove_computed_default_on_computed(self):
"""Asserts the current behavior which is that on PG and Oracle,
the GENERATED ALWAYS AS is reflected as a server default which we can't
tell is actually "computed", so these come out as a modification to
the server default.
"""
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, sa.Computed("bar + 42")),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0][0], "modify_default")
eq_(diffs[0][0][2], "user")
eq_(diffs[0][0][3], "foo")
old = diffs[0][0][-2]
new = diffs[0][0][-1]
is_(new, None)
is_true(isinstance(old, sa.DefaultClause))
if exclusions.against(config, "postgresql"):
eq_(str(old.arg.text), "(bar + 42)")
elif exclusions.against(config, "oracle"):
eq_(str(old.arg.text), '"BAR"+42')
| [
"[email protected]"
] | |
a94cf976e9587529566a28af7ecc54d87fa2a67e | 733b5c3974dd135c390aedbb75ce863abfac0759 | /portal/forms.py | 92d9c7bf4f754d5a879255c286ec998952d941e0 | [] | no_license | stepin-s/electroportal | eb3ade027d548969761a9482aaddbcfb81666d0d | d8228ff77805d405f56d18537fa17dcc945cf8a6 | refs/heads/master | 2022-12-02T12:33:29.163301 | 2020-08-17T07:05:24 | 2020-08-17T07:05:24 | 284,604,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from django import forms
from .models import News
from .models import Videos
class NewsForm(forms.ModelForm):
class Meta:
model = News
fields = ('title', 'text',)
class VideosForm(forms.ModelForm):
class Meta:
model = Videos
fields = ('title', 'text',) | [
"[email protected]"
] | |
27c70bdc66179c2000f823081a3d97b2140bc3e8 | cf945fb7c961376bfcff37c80fe50312d4f32290 | /Books/NetworkScraping_Py3/C2_NetworkHrefScraping/E2_HrefScarpingEntry.py | 30aa204aa2cc770573683076ad0a29dac704befa | [] | no_license | lizhenQAZ/code_manage | faa1e805326cc8da8463e0f8820c9d092a04dddb | f98977d58a9febb8212652846314418bba37bfc7 | refs/heads/master | 2020-12-03T00:00:52.205238 | 2018-12-19T16:00:48 | 2018-12-19T16:00:48 | 95,968,266 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
url = 'https://en.wikipedia.org/wiki/Kevin_Bacon'
rex = re.compile('^(/wiki/)((?!:).)*$')
def gettitle(url_info):
try:
html = urlopen(url_info)
except HTTPError as e:
return None
else:
try:
bsobj = BeautifulSoup(html.read())
title = bsobj.find('div', {'id': 'bodyContent'}).find_all('a', {'href': rex})
except AttributeError as e:
return None
else:
return title
# 获取a标签下词条属性href的链接地址
title_list = gettitle(url)
for title_info in title_list:
if 'href' in title_info.attrs:
print(title_info.attrs['href'])
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.