blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
sequencelengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
338664a163b874791c977fd2119b907b6b0c55f1 | 9a7bd39ee05ebf5fd1b173bb5b8b4d2afd3d7be1 | /backend_svc/setup.py | 5f817dc1595d30c24104afa40d70be0bf0a88f37 | [
"MIT"
] | permissive | MartenSies/KontrolPanel | 7972a124a1dbe61fa4d24fba4fd29396ab10bce1 | 53b0a6e4f721c2ddf9655ba258829841e6e389f5 | refs/heads/master | 2022-03-03T03:10:45.564867 | 2019-11-06T19:53:00 | 2019-11-06T19:53:00 | 208,911,552 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py |
import os
import sys
import subprocess
from distutils.core import Command
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'plaster_pastedeploy',
'pyramid',
'pyramid_debugtoolbar',
'waitress',
'kubernetes',
]
test_requires = [
'WebTest >= 1.3.1',
'pytest >= 3.7.4',
'pytest-cov',
'pylint >= 2.4.3',
'mypy >= 0.740',
]
class Test(Command):
user_options = []
def initialize_options(self):
subprocess.call(['pip', 'install'] + test_requires)
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main(['--cov-report', 'term-missing:skip-covered',
'--cov-report', 'xml',
'--cov', 'backend_svc',
'--cov', 'test',
'test'])
raise sys.exit(errno)
class Linting(Command):
user_options = []
def initialize_options(self):
subprocess.call(['pip', 'install'] + test_requires)
def finalize_options(self):
pass
def run(self):
subprocess.call(['pylint',
'--rcfile', 'pylintrc',
'--output-format', 'parseable',
'backend_svc'])
class TypeLinting(Command):
user_options = []
def initialize_options(self):
subprocess.call(['pip', 'install'] + test_requires)
def finalize_options(self):
pass
def run(self):
subprocess.call(['mypy',
'--ignore-missing-imports',
'--follow-imports=skip',
'backend_svc'])
setup(
name='backend_svc',
version='0.0',
description='backend-svc',
long_description=README + '\n\n' + CHANGES,
cmdclass={'test': Test, 'lint': Linting, 'type': TypeLinting},
packages=find_packages(),
tests_require=test_requires,
install_requires=requires,
entry_points={
'paste.app_factory': [
'main = backend_svc:main',
],
},
)
| [
"[email protected]"
] | |
4fd7a00ec6687b91f5c1c497803a8b74d7c10c9e | f163ff3b942fb91ac797e5fea3da82e394bc287f | /Numpy/remfits2/daylcurves.py | 4997f44d9f7f7680ccd611410e9b82d3bf578102 | [] | no_license | JohnMCollins/python-astro-progs | 3980c59d3e9ac0184647de728d4cd51e32bde35b | 5c4da3e9ebf6e2c5d2e392102e009bc3ad175403 | refs/heads/master | 2023-04-09T06:54:21.487801 | 2023-03-16T18:59:08 | 2023-03-16T18:59:08 | 58,373,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,214 | py | #! /usr/bin/env python3
"""Make light curve from findresults for one object"""
import argparse
import sys
import re
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import numpy as np
import remdefaults
import find_results
import objdata
import remgeom
import miscutils
class Result:
"""Recuord results"""
def __init__(self, when, filtname, findresult, obsind, adubyref=None):
self.when = when
self.filtname = filtname
self.findres = findresult
if adubyref is None:
self.refset = self.adubyref = None
else:
self.refset = set(adubyref)
self.adubyref = adubyref
self.reladus = 0.0
self.obsind = obsind
Valuelist = np.array([]).reshape(0, 2)
Points_list = []
Annotation = None
XBase = YBase = XScale = YScale = None
def setup_hover(plotres, obs):
"""Set up the parameters needed to do hover over chart"""
global Valuelist, Points_list
Valuelist = np.concatenate((Valuelist, plotres[0].get_xydata()))
Points_list += obs
def complete_hover(figur):
"""Complete setup of hover"""
global Annotation, XBase, YBase, XScale, YScale, Valuelist
canv = figur.canvas
axs = figur.axes[0]
Annotation = axs.annotate("", xy=(0, 0), xytext=(20, 20),
xycoords='figure pixels',
textcoords="offset points",
bbox=dict(boxstyle="round",
fc=popupcolour),
arrowprops=dict(arrowstyle="->"))
Annotation.get_bbox_patch().set_alpha(alphaflag)
Annotation.set_visible(False)
canv.mpl_connect('motion_notify_event', hover)
XBase, XScale = axs.get_xlim()
YBase, YScale = axs.get_ylim()
XScale -= XBase
YScale -= YBase
Valuelist -= (XBase, YBase)
Valuelist /= (XScale, YScale)
Valuelist = np.array([complex(r, i) for r, i in Valuelist])
def find_nearest_result(event):
"""Get result nearest to event"""
axs = event.inaxes
if axs is None:
return None
distances = np.abs(Valuelist - complex((event.xdata - XBase) / XScale, (event.ydata - YBase) / YScale))
min_dist_arg = np.argmin(distances)
if distances[min_dist_arg] <= flagdist:
return Points_list[min_dist_arg]
return None
def hover(event):
"""Callback for mouse hover"""
global Annotation
vis = Annotation.get_visible()
res = find_nearest_result(event)
if res is None:
if vis:
Annotation.set_visible(False)
event.canvas.draw_idle()
return
Annotation.set_text("{:%d/%m/%Y %H:%M:%S} obsid {:d}".format(res.when, res.obsind))
Annotation.xy = (event.x, event.y)
Annotation.set_visible(True)
event.canvas.draw_idle()
resultlist = []
matchname = re.compile('(\w+?)(\d+)$')
Filt_colour = dict(g='g', r='r', i='k', z='b')
Lstyles = ('solid', 'dotted', 'dashed', 'dashdot')
Names = dict(GJ551='Proxima Centauri', GJ699="Barnard\'s Star", GJ729='Ross 154')
nresults = dict()
for filtp in Filt_colour:
nresults[filtp] = 0
rg = remgeom.load()
parsearg = argparse.ArgumentParser(description='Get light curve over day', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parsearg.add_argument('files', nargs='+', type=str, help='Find results files')
parsearg.add_argument('--object', type=str, required=True, help='Object')
remdefaults.parseargs(parsearg, tempdir=False)
# parsearg.add_argument('--marker', type=str, default=',', help='Marker style for scatter plot')
parsearg.add_argument('--userefobj', action='store_true', help='Use reference objects')
parsearg.add_argument('--popupcolour', type=str, default='g', help='Popup colour')
parsearg.add_argument('--alphaflag', type=float, default=0.4, help='Alpha for flag and popup')
parsearg.add_argument('--flagdist', type=float, default=10.0, help='Percentage of range for flag dist')
parsearg.add_argument('--filter', type=str, help='Restrict display to just given filter')
parsearg.add_argument('--xlabel', type=str, help='X axis label')
parsearg.add_argument('--ylabel', type=str, help='Y axis label')
parsearg.add_argument('--daterot', type=float, default=45, help='Rotation of dates')
parsearg.add_argument('--verbose', action='store_true', help='Give blow-by-blow account')
parsearg.add_argument('--bytime', action='store_true', help='Display by time')
parsearg.add_argument('--ylower', type=float, help='Lower limit of Y axis')
parsearg.add_argument('--yupper', type=float, help='Upper limit of Y axis')
parsearg.add_argument('--yscale', type=float, help='Scale for Y axis')
rg.disp_argparse(parsearg)
resargs = vars(parsearg.parse_args())
flist = resargs['files']
remdefaults.getargs(resargs)
targobj = resargs['object']
# marker = resargs['marker']
userefs = resargs['userefobj']
popupcolour = resargs['popupcolour']
alphaflag = resargs['alphaflag']
flagdist = resargs['flagdist'] / 100.0
filt = resargs['filter']
if filt:
filt = set(list(filt))
verbose = resargs['verbose']
ylab = resargs['ylabel']
xlab = resargs['xlabel']
daterot = resargs['daterot']
bytime = resargs['bytime']
ylower = resargs['ylower']
yupper = resargs['yupper']
yscale = resargs['yscale']
if xlab is None:
if bytime:
xlab = "Time observation taken"
else:
xlab = "Time & date of observation"
if userefs:
if yscale is None:
yscale = 1.0
if ylab is None:
if yscale != 1.0:
ylab = "Relative ADU count (scale x {:.6g})".format(yscale)
else:
ylab = "Relative ADU count"
else:
if yscale is None:
yscale = 1e3
if ylab is None:
if yscale != 1.0:
ylab = "ADU count (scale x {:.6g})".format(yscale)
else:
ylab = "ADU count"
filt_colour_lists = dict(g=[], i=[], r=[], z=[])
filt_colour_counts = dict(g=0, i=0, r=0, z=0)
if filt is not None and len(filt) == 1:
for ls in Lstyles:
for col in Filt_colour.values():
fc = (ls, col)
for filtp in 'girz':
filt_colour_lists[filtp].append(fc)
else:
for filtp, col in Filt_colour.items():
for ls in Lstyles:
filt_colour_lists[filtp].append((ls, col))
ofig = rg.disp_getargs(resargs)
mydb, mycurs = remdefaults.opendb()
try:
targobj = objdata.get_objname(mycurs, targobj)
except objdata.ObjDataError as e:
print("Trouble with", targobj, e.args[0], file=sys.stderr)
sys.exit(10)
for fil in flist:
try:
findres = find_results.load_results_from_file(fil)
except find_results.FindResultErr as e:
print(fil, "gave error", e.args[0], file=sys.stderr)
continue
if findres.num_results(idonly=True, nohidden=True) == 0:
print(fil, "has no results", file=sys.stderr)
continue
if filt and findres.filter not in filt:
if verbose:
print(fil, "is for filter", findres.filter, "skipping", file=sys.stderr)
continue
try:
targfr = findres[targobj]
except find_results.FindResultErr:
if verbose:
print(targobj, "not found in", fil, "skipping", file=sys.stderr)
continue
if targfr.hide:
if verbose:
print(targobj, "is in", fil, "but is hidden", file=sys.stderr)
continue
if userefs:
refobjadus = dict()
for fr in findres.results(idonly=True, nohidden=True):
name = fr.obj.objname
if name != targobj:
refobjadus[name] = fr.adus
resultlist.append(Result(findres.obsdate, findres.filter, targfr, findres.obsind, refobjadus))
else:
resultlist.append(Result(findres.obsdate, findres.filter, targfr, findres.obsind))
nresults[findres.filter] += 1
if len(resultlist) < 2:
print("Insufficient results", file=sys.stderr)
sys.exit(20)
if filt and verbose:
for filtp in filt:
if nresults[filtp] < 2:
print("Insufficient results for filter", filtp, file=sys.stderr)
if userefs:
# Set up common subset array
# Grab first one to kick things out of
filter_subset = dict()
for r in resultlist:
if r.filtname not in filter_subset:
filter_subset[r.filtname] = r.refset
# Find common subset for each filter as union of what we had before with the new set
for r in resultlist:
filter_subset[r.filtname] &= r.refset
for filtp, fsub in filter_subset.items():
nsub = len(fsub)
if nsub == 0:
if verbose:
print("No common subset for filter", filtp, file=sys.stderr)
nresults[filtp] = 0
continue
if verbose and nsub < 5:
print("Warning only", nsub, "in subset for filter", filtp, file=sys.stderr)
for resp in resultlist:
if resp.filtname == filtp:
resp.reladus = resp.findres.adus / np.sum([resp.adubyref[n] for n in fsub])
fig = rg.plt_figure()
ax = plt.subplot(111)
if ylower is not None:
ax.set_ylim(ylower / yscale, yupper / yscale)
y_formatter = mtick.ScalarFormatter(useOffset=False)
if bytime:
df = mdates.DateFormatter("%H:%M")
else:
df = mdates.DateFormatter("%d/%m/%y %H:%M")
ax.xaxis.set_major_formatter(df)
if daterot != 0.0:
plt.xticks(rotation=daterot)
plt.xlabel(xlab)
plt.ylabel(ylab.format(yscale=yscale))
try:
targobjname = Names[targobj]
except KeyError:
targobjname = targobj
leglist = []
if bytime:
for filtp, nres in nresults.items():
if nres <= 0:
continue
results_for_filter = sorted([res for res in resultlist if res.filtname == filtp], key=lambda x: x.when)
lastdate = datetime.date(1901, 1, 1)
timelist = []
lscount = 0
while len(results_for_filter) != 0:
nxtr = results_for_filter.pop(0)
nxtdt = nxtr.when
nxtd = nxtdt.date()
if nxtd != lastdate:
if len(timelist) > 2:
adulist = np.array(adulist) / yscale
mnadu = adulist.mean()
stadu = adulist.std()
if userefs:
leglist.append("Filter {:s} {:%d/%m/%y} ${:.3g} \pm {:.2g}$ (ss {:d})".format(filtp, lastdate, mnadu, stadu, len(filter_subset[filtp])))
else:
leglist.append("Filter {:s} {:%d/%m/%y} ${:.3g} \pm {:.2g}$".format(filtp, lastdate, mnadu, stadu))
ls, col = filt_colour_lists[filtp][filt_colour_counts[filtp] % len(filt_colour_lists[filtp])]
pstr = plt.errorbar(timelist, adulist, stadu, color=col, linestyle=ls)
filt_colour_counts[filtp] += 1
setup_hover(pstr, obsinds)
timelist = []
adulist = []
obsinds = []
lastdate = nxtd
timelist.append(datetime.datetime(2020, 1, 1, nxtdt.hour, nxtdt.minute, nxtdt.second))
if userefs:
adulist.append(nxtr.reladus)
else:
adulist.append(nxtr.findres.adus)
obsinds.append(nxtr)
# Do trailing ones
if len(timelist) > 2:
adulist = np.array(adulist) / yscale
mnadu = adulist.mean()
stadu = adulist.std()
if userefs:
leglist.append("Filter {:s} {:%d/%m/%y} ${:.3g} \pm {:.2g}$ (ss {:d})".format(filtp, lastdate, mnadu, stadu, len(filter_subset[filtp])))
else:
leglist.append("Filter {:s} {:%d/%m/%y} ${:.3g} \pm {:.2g}$".format(filtp, lastdate, mnadu, stadu))
ls, col = filt_colour_lists[filtp][filt_colour_counts[filtp] % len(filt_colour_lists[filtp])]
pstr = plt.errorbar(timelist, adulist, stadu, color=col, linestyle=ls)
setup_hover(pstr, obsinds)
else:
for filtp, nres in nresults.items():
if nres <= 0:
continue
datelist = []
adulist = []
obsinds = []
for rl in sorted([res for res in resultlist if res.filtname == filtp], key=lambda x: x.when):
datelist.append(rl.when)
if userefs:
adulist.append(rl.reladus)
else:
adulist.append(rl.findres.adus)
obsinds.append(rl)
if len(datelist) < 2:
continue
adulist = np.array(adulist) / yscale
mnadu = adulist.mean()
stadu = adulist.std()
ls, col = filt_colour_lists[filtp][filt_colour_counts[filtp] % len(filt_colour_lists[filtp])]
pstr = plt.errorbar(datelist, adulist, stadu, color=col, linestyle=ls)
filt_colour_counts[filtp] += 1
setup_hover(pstr, obsinds)
if userefs:
leglist.append("Filter {:s} ${:.3g} \pm {:.2g}$ ({:d} subset)".format(filtp, mnadu, stadu, len(filter_subset[filtp])))
else:
leglist.append("Filter {:s} ${:.3g} \pm {:.2g}$".format(filtp, mnadu, stadu))
plt.legend(leglist)
plt.tight_layout()
if ofig is None:
complete_hover(fig)
plt.show()
else:
ofig = miscutils.replacesuffix(ofig, 'png')
fig.savefig(ofig)
| [
"[email protected]"
] | |
fbf1b1c4748f7ba833bd84c2a17ae2c06c738890 | db729f16d023fbacf61c05a437847287c9365472 | /research_notebook/hwan_opencv/new_opencv/np_test.py | 521550dbd2cf6dbe2adb07a781356d7bd4d90d2d | [] | no_license | co24428/Coffee_Inspection_Work | 1b7d9bfd4d2aa29480acca339c0502eb49a2ae3a | 0823c0b1b276d341a65386c05e9174520fc84b21 | refs/heads/master | 2023-08-01T10:41:16.696500 | 2020-06-16T02:03:57 | 2020-06-16T02:03:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | a = [
[12, 18, 6, 3],
[ 4, 3, 1, 2],
[15, 8, 9, 6]
]
a.sort(key=lambda x: x[1])
print(a)
| [
"[email protected]"
] | |
53a392751a75f85027707c09f1f615efa1879fc4 | 1705e97ef5613685e142e3f78a2057399b09858c | /Code/asiportal/asiapp/wsgi.py | d3acec3485332c8d95214dd8fcb36efc399cc96e | [] | no_license | FIU-SCIS-Senior-Projects/Academic-Success-Initiative---ASI-PantherCentric-1.0 | 0b956175efb031022ed32412195531c7f0c162c5 | 8ee64b58e2634384d5905defd3701a453b49b966 | refs/heads/master | 2022-11-24T00:07:52.458186 | 2017-08-02T01:36:32 | 2017-08-02T01:36:32 | 91,715,982 | 0 | 0 | null | 2022-11-22T01:31:04 | 2017-05-18T16:37:10 | SQLPL | UTF-8 | Python | false | false | 389 | py | """
WSGI config for asiapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asiapp.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
805c56c2ef847fa55c68c70aa320b8e855253e40 | ab011130b25a0e83c1c231627f796f28241b1648 | /database/env.py | 974e6c469d8489b3090acceef563653319e172c1 | [] | no_license | Srikanth-SM/todo-gRPC | 116f5d74305afb1239ef142fa08388950e91481f | 721c98974e6192ba6a5ef6a0dd5a19a176411cc4 | refs/heads/master | 2020-04-16T22:47:43.130533 | 2019-01-18T09:56:31 | 2019-01-18T09:56:31 | 164,848,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from database.models import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"[email protected]"
] | |
901f068a066a5fe9dc359cf291f0730aebf4c144 | c792518d737f8a3542c3ac9141f6fcf33c3479bd | /08_Joshua_Pyhton_Lab10.py | 40f9814362582c9aca503251c34790de5282b320 | [] | no_license | Josheaaa/08_Joshua_Python_Lab10 | e73b3f94bd546939637b134cb3572c4402c2f1b4 | d84256073c33ff2c20b8488e81b7cc982e050ea0 | refs/heads/master | 2023-07-02T09:22:44.005777 | 2021-08-06T03:07:27 | 2021-08-06T03:07:27 | 393,232,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,067 | py | import pandas as pd
# Create a python series using list to add, subtract, multiply and divide another panda series
ps1 = pd.Series([1, 3, 5, 7, 9])
ps2 = pd.Series([2, 4, 6, 8, 10])
# Create another python series to add, subtract, multiply and divide from previous series
ps = ps1 + ps2
print(ps)
ps = ps2 - ps1
print(ps)
ps = ps2 *ps1
print(ps)
ps = ps2 / ps1
print(ps)
# Compare the elements in the above series (compare will output Boolean result)
print(ps2 == ps1)
print(ps2 > ps1)
print(ps2 < ps1)
# Create a panda series with dictionary:
dict = {'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50}
psd = pd.Series(dict)
print(psd)
# Create a numPy Array first before converting it to panda Series:
import numpy as np
np_array = np.array([10, 20, 30, 40, 50])
print(np_array)
ps_numArray = pd.Series(np_array)
print(ps_numArray)
# Convert a panda series object to numeric:
s1 = pd.Series([10, '20', 'python', '48', '50'])
print(s1)
s2 = pd.to_numeric(s1, errors='coerce')
print(s2)
# Add new data to the s2 series:
s2 = s2.append(pd.Series([60, 70]))
print(s2)
# Sort the value of s2 series to sorted_s2:
sorted_s2 = s2.sort_values()
print(sorted_s2)
# Re-index sorted_s2 series:
sorted_s2.index = [1, 2, 3, 4, 5, 6, 7]
print(sorted_s2)
# Calculate the mean, median and standard deviation of the sorted list:
sorted_s2.mean()
sorted_s2.median()
sorted_s2.std()
# Convert the above series to a list:
var1 = s2.values.tolist()
print(var1)
type(var1)
# Convert the above list to numpy array:
npArray = np.array(var1)
print(npArray)
type(npArray)
# Combine a series of list to panda series and store only the distinct color:
import pandas as pd
colorList = pd.Series([
['Red', 'Blue', 'Yellow'],
['Red', 'White'],
['Black']])
print(colorList)
s = colorList
s = s.apply(pd.Series).stack().reset_index(drop=True)
print(s)
# Create a dataframe using a single list or list of list:
list = [1, 2, 3, 4, 5]
df = pd.DataFrame(list)
print(df)
listOflist = [['Mike', 5], ['Peter', 10], ['Thomas', 15]]
df = pd.DataFrame(listOflist,columns=['Name', 'Age'])
print(df)
#Create a dataframe using a dictionary data structure:
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Bloemfontein"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98] }
brics = pd.DataFrame(dict)
print(brics)
# Change or set the index for brics.
# Set the index for brics
brics.index = ["BR", "RU", "IN", "CH", "SA"]
# Print out brics with new index values
print(brics)
# Using Dataframe to read CSV file:
# Import the cars.csv data: cars
cars = pd.read_csv('mtcars.csv')
#print out cars
print(cars)
# Using head() to read first 5 rows and tail() to read last 5 rows:
cars.head()
# Use columns to read only the columns header:
cars.columns
# Display the original index:
cars.index # Print original indexes
# Create another dataframe by spliting the Car Brand and Model
car = cars['model'].str.split(' ', n = 1, expand=True)
print(car)
# Assign the new car brand and models back to the original dataframe.
cars = cars.assign(car_brand=car[0]) # assign a new column named car_brand
print(cars)
cars = cars.assign(car_model=car[1]) # assign a new column car_brand
print(cars)
cars[cars['car_brand'] == 'Mazda'] # search for car_brand belonging to Mazda
print(cars)
# Change the index to Car mode
cars.index = cars ['model'] # Set indexes to car name
print(cars)
del cars['model'] # Delete the model column
print(cars) #print new indexes
cars.iloc[:,:6].describe() # Summarize the first 6 columns
# Display the Car new info for 10 records:
print(cars.head(10))
# Find the mean for the dataframe columns:
print(cars.mean())
# Using matplotlib to plot a graph relationship between mile per gallon (mpg) to horse power(hp):
import matplotlib.pyplot as plt
#plt.scatter (cars['mpg'], cars['hp'])
#plt.show(); #or plt.savefig("name.png")
# Using matplotlib to plot a bar chart to shows the horse power of each car model.
#fig = plt.figure
#ax = cars[['car_model', 'hp']].plot(kind='bar', title="Horse Power comparison")
#plt.show()
# Plot a histogram to show the category of Horse Power in most cars.
fig = plt.figure
ax = cars['hp'].plot(kind='hist', title="Range in HP", figsize=(10, 10), legend=True, fontsize=12)
plt.show()
# Saving the histogram diagram in a png format in the current directory of program executed.
my_fig = ax.get_figure() # Get the figure
my_fig.savefig("Car_Range_HP.png") #Save to file
# Write the following code to find the most fuel-efficient car in the data file and present the result in bar chart in ascending order.
ps = cars['mpg'].sort_values()
index = np.arange(len(ps.index))
plt.xlabel('Models', fontsize=10)
plt.ylabel('Models', fontsize=5)
plt.xticks(index, ps.index, fontsize=10, rotation=90)
plt.title('Miles per gallon of Cars')
plt.bar(ps.index, ps.values)
plt.show(); #or plt.savefig("name.png")
| [
"[email protected]"
] | |
db6aeb882a3c913eaa74cd0748b30eb5a94b2128 | b21c5a443e4e30308f3a62cbd9043285e8f61623 | /Algorithms/MedianOfTwoSortedArrays.py | ec8ec42922e48fd27d7964ba0820d283cad62384 | [] | no_license | RobertoGuzmanJr/PythonToolsAndExercises | eec9053214ddcf43b661f4ce427d6eda5bf4b840 | 8ec61b002204ecc4b87abe09f8dffcfe3aaa07e6 | refs/heads/master | 2022-09-24T01:15:11.074889 | 2020-06-08T20:55:09 | 2020-06-08T20:55:09 | 257,700,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
m = len(nums1)
n = len(nums2)
p1 = p2 = 0
mids = []
s = 0
#even case
if (m + n) % 2 == 0:
while s <= (m+n)/2:
val = 0
if p1 == m:
val = nums2[p2]
p2 += 1
elif p2 == n:
val = nums1[p1]
p1 += 1
elif nums1[p1] <= nums2[p2]:
val = nums1[p1]
p1 += 1
else:
val = nums2[p2]
p2 += 1
if s in [((m+n)/2),((m+n)/2)-1]:
mids.append(val)
s += 1
#odd case
else:
while s <= (m+n)/2:
val = 0
if p1 == m:
val = nums2[p2]
p2 += 1
elif p2 == n:
val = nums1[p1]
p1 += 1
elif nums1[p1] <= nums2[p2]:
val = nums1[p1]
p1 += 1
else:
val = nums2[p2]
p2 += 1
if s in [(m+n-1)/2]:
mids.append(val)
s += 1
return mean(mids)
| [
"[email protected]"
] | |
9d7b85960b3a7ec53691dfeae2049675745204e7 | f975d71c902c4123c2e2a1e51f72b17571e925ef | /tests/unit/test_db.py | badc9da37fd2ca5186dd611b9ba67e751e91ec58 | [] | no_license | qarthandgi/energy-bill | abc4e88ef266fefb5ff60bf0e6a27d26af3c0d15 | 19adc1b7f009c4ad3a328f652e6c7a7aff4a26ea | refs/heads/master | 2021-06-23T21:42:11.474738 | 2019-10-28T22:19:56 | 2019-10-28T22:19:56 | 218,155,303 | 0 | 0 | null | 2021-04-20T19:01:02 | 2019-10-28T22:16:13 | Python | UTF-8 | Python | false | false | 64 | py | from src import db
def test_db():
db.test()
assert True | [
"[email protected]"
] | |
165151141ee57f68ac0c0c19274ab313a9f6137a | 5ce59b426bbb13ade60aedba09e097672b100c56 | /practice_setup.py | b8e097ea03ea79e3b6555b887038bb1554adfa3d | [] | no_license | connoralbrecht/AI-Final-Project | b9dced5b1fad2fa3ce0ff3dd4e900b6aab0d7747 | 0e5875b501d2ed86bbb60e723dcfdfc573b42908 | refs/heads/master | 2020-04-09T06:27:39.551499 | 2018-12-06T22:50:20 | 2018-12-06T22:50:20 | 160,113,461 | 0 | 0 | null | 2018-12-05T13:59:41 | 2018-12-03T01:07:26 | Python | UTF-8 | Python | false | false | 15,270 | py | # Created by Minbiao Han and Roman Sharykin
# AI fall 2018
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import MalmoPython
import json
import logging
import math
import os
import random
import sys
import time
import re
import uuid
from collections import namedtuple
from operator import add
from random import *
import numpy as np
import practice
EntityInfo = namedtuple('EntityInfo', 'x, y, z, name')
# Create one agent host for parsing:
agent_hosts = [MalmoPython.AgentHost()]
# Parse the command-line options:
agent_hosts[0].addOptionalFlag( "debug,d", "Display debug information.")
agent_hosts[0].addOptionalIntArgument("agents,a", "Number of agents to use, including observer.", 2)
agent_hosts[0].addOptionalStringArgument("map,m", "Name of map to be used", "practice")
agent_hosts[0].addOptionalIntArgument("port,p", "The port to start on", 10000)
agent_hosts[0].addOptionalFloatArgument("noise,n", "Enemy chance to randomly move", 0.3)
try:
agent_hosts[0].parse( sys.argv )
except RuntimeError as e:
print('ERROR:',e)
print(agent_hosts[0].getUsage())
exit(1)
if agent_hosts[0].receivedArgument("help"):
print(agent_hosts[0].getUsage())
exit(0)
DEBUG = agent_hosts[0].receivedArgument("debug")
INTEGRATION_TEST_MODE = agent_hosts[0].receivedArgument("test")
agents_requested = agent_hosts[0].getIntArgument("agents")
NUM_AGENTS = max(1, agents_requested) # Will be NUM_AGENTS robots running around, plus one static observer.
map_requested = agent_hosts[0].getStringArgument("map")
PORT = agent_hosts[0].getIntArgument("port")
NOISE = agent_hosts[0].getFloatArgument("noise")
# Create the rest of the agent hosts - one for each robot, plus one to give a bird's-eye view:
agent_hosts += [MalmoPython.AgentHost() for x in range(1, NUM_AGENTS) ]
# Set up debug output:
for ah in agent_hosts:
ah.setDebugOutput(DEBUG) # Turn client-pool connection messages on/off.
if sys.version_info[0] == 2:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
else:
import functools
print = functools.partial(print, flush=True)
def safeStartMission(agent_host, my_mission, my_client_pool, my_mission_record, role, expId):
used_attempts = 0
max_attempts = 5
print("Calling startMission for role", role)
while True:
try:
# Attempt start:
agent_host.startMission(my_mission, my_client_pool, my_mission_record, role, expId)
break
except MalmoPython.MissionException as e:
errorCode = e.details.errorCode
if errorCode == MalmoPython.MissionErrorCode.MISSION_SERVER_WARMING_UP:
print("Server not quite ready yet - waiting...")
time.sleep(2)
elif errorCode == MalmoPython.MissionErrorCode.MISSION_INSUFFICIENT_CLIENTS_AVAILABLE:
print("Not enough available Minecraft instances running.")
used_attempts += 1
if used_attempts < max_attempts:
print("Will wait in case they are starting up.", max_attempts - used_attempts, "attempts left.")
time.sleep(2)
elif errorCode == MalmoPython.MissionErrorCode.MISSION_SERVER_NOT_FOUND:
print("Server not found - has the mission with role 0 been started yet?")
used_attempts += 1
if used_attempts < max_attempts:
print("Will wait and retry.", max_attempts - used_attempts, "attempts left.")
time.sleep(2)
else:
print("Other error:", e.message)
print("Waiting will not help here - bailing immediately.")
exit(1)
if used_attempts == max_attempts:
print("All chances used up - bailing now.")
exit(1)
print("startMission called okay.")
def safeWaitForStart(agent_hosts):
print("Waiting for the mission to start", end=' ')
start_flags = [False for a in agent_hosts]
start_time = time.time()
time_out = 120 # Allow a two minute timeout.
while not all(start_flags) and time.time() - start_time < time_out:
states = [a.peekWorldState() for a in agent_hosts]
start_flags = [w.has_mission_begun for w in states]
errors = [e for w in states for e in w.errors]
if len(errors) > 0:
print("Errors waiting for mission start:")
for e in errors:
print(e.text)
print("Bailing now.")
exit(1)
time.sleep(0.1)
print(".", end=' ')
if time.time() - start_time >= time_out:
print("Timed out while waiting for mission to start - bailing.")
exit(1)
print()
print("Mission has started.")
def getLayout(name):
matrix = tryToLoad("layouts/" + name)
return matrix
def tryToLoad(fullname):
if (not os.path.exists(fullname)): return None
f = open(fullname)
Matrix = [line.strip() for line in f]
f.close()
return Matrix
level_mat = getLayout(map_requested + ".lay")
def drawItems(x, z):
return '<DrawItem x="' + str(x) + '" y="56" z="' + str(z) + '" type="apple"/>'
def GenBlock(x, y, z, blocktype):
return '<DrawBlock x="' + str(x) + '" y="' + str(y) + '" z="' + str(z) + '" type="' + blocktype + '"/>'
def GenPlayerStart(x, z):
return '<Placement x="' + str(x + 0.5) + '" y="56" z="' + str(z + 0.5) + '" yaw="0"/>'
def GenEnemyStart(x, z):
return '<Placement x="' + str(x + 0.5) + '" y="56" z="' + str(z + 0.5) + '" yaw="0"/>'
pStart = {'x': 0, 'z': 0}
eStart = {'x': 0, 'z': 0}
pCurr = {'x': 0, 'z': 0}
eCurr = {'x': 0, 'z': 0}
possible_dests = []
def mazeCreator():
genstring = ""
genstring += GenBlock(0, 65, 0, "glass") + "\n"
for i in range(len(level_mat)):
for j in range(len(level_mat[0])):
if level_mat[i][j] == "%":
genstring += GenBlock(i, 54, j, "diamond_block") + "\n"
genstring += GenBlock(i, 55, j, "diamond_block") + "\n"
genstring += GenBlock(i, 56, j, "diamond_block") + "\n"
elif level_mat[i][j] == "P":
pStart['x'] = i
pStart['z'] = j
pCurr['x'] = i
pCurr['z'] = j
elif level_mat[i][j] == ".":
genstring += GenBlock(i, 55, j, "glowstone") + "\n"
possible_dests.append((i, j))
elif level_mat[i][j] == "G":
eStart['x'] = i
eStart['z'] = j
eCurr['x'] = i
eCurr['z'] = j
return genstring
def invMake():
xml = ""
for i in range(0, 39):
xml += '<InventoryObject type="diamond_axe" slot="' + str(i) + '" quantity="1"/>'
return(xml)
def getXML(reset):
# Set up the Mission XML:
xml = '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
''' + mazeCreator() + '''
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="100000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>Player</Name>
<AgentStart> ''' + GenPlayerStart(pStart['x'], pStart['z']) + ''' </AgentStart>
<AgentHandlers>
<DiscreteMovementCommands/>
<ObservationFromFullStats/>
<ObservationFromGrid>
<Grid name="floor3x3W">
<min x="-1" y="0" z="-1"/>
<max x="1" y="0" z="1"/>
</Grid>
<Grid name="floor3x3F">
<min x="-1" y="-1" z="-1"/>
<max x="1" y="-1" z="1"/>
</Grid>
</ObservationFromGrid>
</AgentHandlers>
</AgentSection>
<AgentSection mode="Survival">
<Name>Enemy</Name>
<AgentStart>
''' + GenEnemyStart(eStart['x'], eStart['z']) + '''
<Inventory>''' + invMake() + '''</Inventory>
</AgentStart>
<AgentHandlers>
<DiscreteMovementCommands/>
<ObservationFromFullStats/>
<ObservationFromGrid>
<Grid name="floor3x3W">
<min x="-1" y="0" z="-1"/>
<max x="1" y="0" z="1"/>
</Grid>
<Grid name="floor3x3F">
<min x="-1" y="-1" z="-1"/>
<max x="1" y="-1" z="1"/>
</Grid>
</ObservationFromGrid>
</AgentHandlers>
</AgentSection>
</Mission>'''
return xml
client_pool = MalmoPython.ClientPool()
for x in range(PORT, PORT + NUM_AGENTS + 1):
client_pool.add( MalmoPython.ClientInfo('127.0.0.1', x) )
print("Running mission")
# Create mission xml - use forcereset if this is the first mission.
my_mission = MalmoPython.MissionSpec(getXML("true"), True)
experimentID = str(uuid.uuid4())
for i in range(len(agent_hosts)):
safeStartMission(agent_hosts[i], my_mission, client_pool, MalmoPython.MissionRecordSpec(), i, experimentID)
safeWaitForStart(agent_hosts)
time.sleep(1)
running = True
# Everything prior to here is mostly boring setup stuff. After this is the more interesting bits
current_pos = [(0,0) for x in range(NUM_AGENTS)]
# When an agent is killed, it stops getting observations etc. Track this, so we know when to bail.
timed_out = False
g_score = 0
selected_dest=(0,0)
dest_reached= False
dest_scores = [0 for x in possible_dests]
#dest_probs = [1/len(possible_dests) for x in possible_dests]
selected_dest = choice(possible_dests)
# This while loop represents one iteration of the "game"
while not timed_out and not dest_reached:
print('global score:', g_score)
print("--------- START OF TURN -------------")
for i in range(NUM_AGENTS):
ah = agent_hosts[i]
world_state = ah.getWorldState()
if world_state.is_mission_running == False:
timed_out = True
if world_state.is_mission_running and world_state.number_of_observations_since_last_state > 0:
msg = world_state.observations[-1].text
ob = json.loads(msg)
#print(current_pos[i])
# Handles enemy movement
if ob['Name'] == 'Enemy':
if "XPos" in ob and "ZPos" in ob:
current_pos[i] = (ob[u'XPos'], ob[u'ZPos'])
#print("Enemy initial pos ", current_pos[i])
print('enemy moving:')
practice.enemyMoveDest(ah, current_pos[i], world_state, selected_dest, NOISE)
ah = agent_hosts[i]
world_state = ah.getWorldState()
if world_state.is_mission_running and world_state.number_of_observations_since_last_state > 0:
msg = world_state.observations[-1].text
ob = json.loads(msg)
if "XPos" in ob and "ZPos" in ob:
current_pos[i] = (ob[u'XPos'], ob[u'ZPos'])
#print("Enemy updated pos ", current_pos[i])
eCurr['x'] = current_pos[i][0]
eCurr['z'] = current_pos[i][1]
if (current_pos[i] == (pCurr['x'], pCurr['z'])):
g_score -= 100
timed_out = True
break
if ((current_pos[i][0] - 0.5, current_pos[i][1] - 0.5) == selected_dest):
print("Enemy reached destination!")
dest_reached= True
g_score -=30
break
time.sleep(0.1)
# Handles agent movement
if ob['Name'] == 'Player':
if "XPos" in ob and "ZPos" in ob:
current_pos[i] = (ob[u'XPos'], ob[u'ZPos'])
#print("Agent initial pos ", current_pos[i])
if (current_pos[i] == (eCurr['x'], eCurr['z'])):
g_score -= 100
timed_out = True
break
print('agent moving')
dest_scores=practice.agentMove(ah, current_pos[i], world_state, possible_dests, (eCurr['x'], eCurr['z']),dest_scores)
ah = agent_hosts[i]
world_state = ah.getWorldState()
if world_state.is_mission_running and world_state.number_of_observations_since_last_state > 0:
msg = world_state.observations[-1].text
ob = json.loads(msg)
if "XPos" in ob and "ZPos" in ob:
current_pos[i] = (ob[u'XPos'], ob[u'ZPos'])
#print("Agent updated pos ", current_pos[i])
if ((current_pos[i][0] - 0.5, current_pos[i][1] - 0.5) == selected_dest):
#print("Agent reached destination!")
dest_reached= True
g_score += 50
break
if (current_pos[i] == (eCurr['x'], eCurr['z'])):
g_score -= 100
timed_out = True
break
#g_score -= 1
pCurr['x'] = current_pos[i][0]
pCurr['z'] = current_pos[i][1]
if((pCurr['x']*10)%5 != 0 or (pCurr['z']*10)%5 != 0 or (eCurr['x']*10)%5 != 0 or (eCurr['z']*10)%5 != 0):
print(pCurr['x'], " ", pCurr['z'])
g_score -= 100
timed_out = True
print("TIMED OUT")
break
time.sleep(0.05)
print(g_score)
print("Waiting for mission to end ", end=' ')
# Mission should have ended already, but we want to wait until all the various agent hosts
# have had a chance to respond to their mission ended message.
hasEnded = True
while not hasEnded:
hasEnded = True # assume all good
print(".", end="")
time.sleep(0.1)
for ah in agent_hosts:
world_state = ah.getWorldState()
if world_state.is_mission_running:
hasEnded = False # all not good
time.sleep(2) | [
"[email protected]"
] | |
1562a42c83d7480a67c631a9a7b097f839980268 | 71c331e4b1e00fa3be03b7f711fcb05a793cf2af | /QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/storage/v1/storage_v1_messages.py | d1d192d4db3de2568479ea3d6ec6356cf43099a3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iofh/QA-System | 568228bb0c0adf9ec23b45cd144d61049e720002 | af4a8f1b5f442ddf4905740ae49ed23d69afb0f6 | refs/heads/master | 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138,678 | py | """Generated message classes for storage version v1.
Stores and retrieves potentially large, immutable data objects.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import message_types as _message_types
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'storage'
class Bucket(_messages.Message):
r"""A bucket.
Messages:
BillingValue: The bucket's billing configuration.
CorsValueListEntry: A CorsValueListEntry object.
EncryptionValue: Encryption configuration for a bucket.
IamConfigurationValue: The bucket's IAM configuration.
LabelsValue: User-provided labels, in key/value pairs.
LifecycleValue: The bucket's lifecycle configuration. See lifecycle
management for more information.
LoggingValue: The bucket's logging configuration, which defines the
destination bucket and optional name prefix for the current bucket's
logs.
OwnerValue: The owner of the bucket. This is always the project team's
owner group.
RetentionPolicyValue: The bucket's retention policy. The retention policy
enforces a minimum retention time for all objects contained in the
bucket, based on their creation time. Any attempt to overwrite or delete
objects younger than the retention period will result in a
PERMISSION_DENIED error. An unlocked retention policy can be modified or
removed from the bucket via a storage.buckets.update operation. A locked
retention policy cannot be removed or shortened in duration for the
lifetime of the bucket. Attempting to remove or decrease period of a
locked retention policy will result in a PERMISSION_DENIED error.
VersioningValue: The bucket's versioning configuration.
WebsiteValue: The bucket's website configuration, controlling how the
service behaves when accessing bucket contents as a web site. See the
Static Website Examples for more information.
Fields:
acl: Access controls on the bucket.
billing: The bucket's billing configuration.
cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration.
defaultEventBasedHold: The default value for event-based hold on newly
created objects in this bucket. Event-based hold is a way to retain
objects indefinitely until an event occurs, signified by the hold's
release. After being released, such objects will be subject to bucket-
level retention (if any). One sample use case of this flag is for banks
to hold loan documents for at least 3 years after loan is paid in full.
Here, bucket-level retention is 3 years and the event is loan being paid
in full. In this example, these objects will be held intact for any
number of years until the event has occurred (event-based hold on the
object is released) and then 3 more years after that. That means
retention duration of the objects begins from the moment event-based
hold transitioned from true to false. Objects under event-based hold
cannot be deleted, overwritten or archived until the hold is removed.
defaultObjectAcl: Default access controls to apply to new objects when no
ACL is provided.
encryption: Encryption configuration for a bucket.
etag: HTTP 1.1 Entity tag for the bucket.
iamConfiguration: The bucket's IAM configuration.
id: The ID of the bucket. For buckets, the id and name properties are the
same.
kind: The kind of item this is. For buckets, this is always
storage#bucket.
labels: User-provided labels, in key/value pairs.
lifecycle: The bucket's lifecycle configuration. See lifecycle management
for more information.
location: The location of the bucket. Object data for objects in the
bucket resides in physical storage within this region. Defaults to US.
See the developer's guide for the authoritative list.
locationType: The type of the bucket location.
logging: The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
metageneration: The metadata generation of this bucket.
name: The name of the bucket.
owner: The owner of the bucket. This is always the project team's owner
group.
projectNumber: The project number of the project the bucket belongs to.
retentionPolicy: The bucket's retention policy. The retention policy
enforces a minimum retention time for all objects contained in the
bucket, based on their creation time. Any attempt to overwrite or delete
objects younger than the retention period will result in a
PERMISSION_DENIED error. An unlocked retention policy can be modified or
removed from the bucket via a storage.buckets.update operation. A locked
retention policy cannot be removed or shortened in duration for the
lifetime of the bucket. Attempting to remove or decrease period of a
locked retention policy will result in a PERMISSION_DENIED error.
selfLink: The URI of this bucket.
storageClass: The bucket's default storage class, used whenever no
storageClass is specified for a newly-created object. This defines how
objects in the bucket are stored and determines the SLA and the cost of
storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE,
COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is
not specified when the bucket is created, it will default to STANDARD.
For more information, see storage classes.
timeCreated: The creation time of the bucket in RFC 3339 format.
updated: The modification time of the bucket in RFC 3339 format.
versioning: The bucket's versioning configuration.
website: The bucket's website configuration, controlling how the service
behaves when accessing bucket contents as a web site. See the Static
Website Examples for more information.
zoneAffinity: The zone or zones from which the bucket is intended to use
zonal quota. Requests for data from outside the specified affinities are
still allowed but won't be able to use zonal quota. The zone or zones
need to be within the bucket location otherwise the requests will fail
with a 400 Bad Request response.
zoneSeparation: If set, objects placed in this bucket are required to be
separated by disaster domain.
"""
class BillingValue(_messages.Message):
r"""The bucket's billing configuration.
Fields:
requesterPays: When set to true, Requester Pays is enabled for this
bucket.
"""
requesterPays = _messages.BooleanField(1)
class CorsValueListEntry(_messages.Message):
r"""A CorsValueListEntry object.
Fields:
maxAgeSeconds: The value, in seconds, to return in the Access-Control-
Max-Age header used in preflight responses.
method: The list of HTTP methods on which to include CORS response
headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
of methods, and means "any method".
origin: The list of Origins eligible to receive CORS response headers.
Note: "*" is permitted in the list of origins, and means "any Origin".
responseHeader: The list of HTTP headers other than the simple response
headers to give permission for the user-agent to share across domains.
"""
maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
method = _messages.StringField(2, repeated=True)
origin = _messages.StringField(3, repeated=True)
responseHeader = _messages.StringField(4, repeated=True)
class EncryptionValue(_messages.Message):
r"""Encryption configuration for a bucket.
Fields:
defaultKmsKeyName: A Cloud KMS key that will be used to encrypt objects
inserted into this bucket, if no encryption method is specified.
"""
defaultKmsKeyName = _messages.StringField(1)
class IamConfigurationValue(_messages.Message):
r"""The bucket's IAM configuration.
Messages:
BucketPolicyOnlyValue: The bucket's uniform bucket-level access
configuration. The feature was formerly known as Bucket Policy Only.
For backward compatibility, this field will be populated with
identical information as the uniformBucketLevelAccess field. We
recommend using the uniformBucketLevelAccess field to enable and
disable the feature.
UniformBucketLevelAccessValue: The bucket's uniform bucket-level access
configuration.
Fields:
bucketPolicyOnly: The bucket's uniform bucket-level access
configuration. The feature was formerly known as Bucket Policy Only.
For backward compatibility, this field will be populated with
identical information as the uniformBucketLevelAccess field. We
recommend using the uniformBucketLevelAccess field to enable and
disable the feature.
uniformBucketLevelAccess: The bucket's uniform bucket-level access
configuration.
"""
class BucketPolicyOnlyValue(_messages.Message):
r"""The bucket's uniform bucket-level access configuration. The feature
was formerly known as Bucket Policy Only. For backward compatibility,
this field will be populated with identical information as the
uniformBucketLevelAccess field. We recommend using the
uniformBucketLevelAccess field to enable and disable the feature.
Fields:
enabled: If set, access is controlled only by bucket-level or above
IAM policies.
lockedTime: The deadline for changing
iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC
3339 format. iamConfiguration.bucketPolicyOnly.enabled may be
changed from true to false until the locked time, after which the
field is immutable.
"""
enabled = _messages.BooleanField(1)
lockedTime = _message_types.DateTimeField(2)
class UniformBucketLevelAccessValue(_messages.Message):
r"""The bucket's uniform bucket-level access configuration.
Fields:
enabled: If set, access is controlled only by bucket-level or above
IAM policies.
lockedTime: The deadline for changing
iamConfiguration.uniformBucketLevelAccess.enabled from true to false
in RFC 3339 format.
iamConfiguration.uniformBucketLevelAccess.enabled may be changed
from true to false until the locked time, after which the field is
immutable.
"""
enabled = _messages.BooleanField(1)
lockedTime = _message_types.DateTimeField(2)
bucketPolicyOnly = _messages.MessageField('BucketPolicyOnlyValue', 1)
uniformBucketLevelAccess = _messages.MessageField('UniformBucketLevelAccessValue', 2)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-provided labels, in key/value pairs.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: An individual label entry.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class LifecycleValue(_messages.Message):
r"""The bucket's lifecycle configuration. See lifecycle management for
more information.
Messages:
RuleValueListEntry: A RuleValueListEntry object.
Fields:
rule: A lifecycle management rule, which is made of an action to take
and the condition(s) under which the action will be taken.
"""
class RuleValueListEntry(_messages.Message):
r"""A RuleValueListEntry object.
Messages:
ActionValue: The action to take.
ConditionValue: The condition(s) under which the action will be taken.
Fields:
action: The action to take.
condition: The condition(s) under which the action will be taken.
"""
class ActionValue(_messages.Message):
r"""The action to take.
Fields:
storageClass: Target storage class. Required iff the type of the
action is SetStorageClass.
type: Type of the action. Currently, only Delete and SetStorageClass
are supported.
"""
storageClass = _messages.StringField(1)
type = _messages.StringField(2)
class ConditionValue(_messages.Message):
r"""The condition(s) under which the action will be taken.
Fields:
age: Age of an object (in days). This condition is satisfied when an
object reaches the specified age.
createdBefore: A date in RFC 3339 format with only the date part
(for instance, "2013-01-15"). This condition is satisfied when an
object is created before midnight of the specified date in UTC.
customTimeBefore: A timestamp in RFC 3339 format. This condition is
satisfied when the custom time on an object is before this
timestamp.
daysSinceCustomTime: Number of days elapsed since the user-specified
timestamp set on an object. The condition is satisfied if the days
elapsed is at least this number. If no custom timestamp is
specified on an object, the condition does not apply.
daysSinceNoncurrentTime: Number of days elapsed since the noncurrent
timestamp of an object. The condition is satisfied if the days
elapsed is at least this number. This condition is relevant only
for versioned objects. The value of the field must be a
nonnegative integer. If it's zero, the object version will become
eligible for Lifecycle action as soon as it becomes noncurrent.
isLive: Relevant only for versioned objects. If the value is true,
this condition matches live objects; if the value is false, it
matches archived objects.
matchesPattern: A regular expression that satisfies the RE2 syntax.
This condition is satisfied when the name of the object matches
the RE2 pattern. Note: This feature is currently in the "Early
Access" launch stage and is only available to a whitelisted set of
users; that means that this feature may be changed in backward-
incompatible ways and that it is not guaranteed to be released.
matchesStorageClass: Objects having any of the storage classes
specified by this condition will be matched. Values include
MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD,
and DURABLE_REDUCED_AVAILABILITY.
noncurrentTimeBefore: A timestamp in RFC 3339 format. This condition
is satisfied when the noncurrent time on an object is before this
timestamp. This condition is relevant only for versioned objects.
numNewerVersions: Relevant only for versioned objects. If the value
is N, this condition is satisfied when there are at least N
versions (including the live version) newer than this version of
the object.
"""
age = _messages.IntegerField(1, variant=_messages.Variant.INT32)
createdBefore = extra_types.DateField(2)
customTimeBefore = _message_types.DateTimeField(3)
daysSinceCustomTime = _messages.IntegerField(4, variant=_messages.Variant.INT32)
daysSinceNoncurrentTime = _messages.IntegerField(5, variant=_messages.Variant.INT32)
isLive = _messages.BooleanField(6)
matchesPattern = _messages.StringField(7)
matchesStorageClass = _messages.StringField(8, repeated=True)
noncurrentTimeBefore = _message_types.DateTimeField(9)
numNewerVersions = _messages.IntegerField(10, variant=_messages.Variant.INT32)
action = _messages.MessageField('ActionValue', 1)
condition = _messages.MessageField('ConditionValue', 2)
rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True)
class LoggingValue(_messages.Message):
r"""The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
Fields:
logBucket: The destination bucket where the current bucket's logs should
be placed.
logObjectPrefix: A prefix for log object names.
"""
logBucket = _messages.StringField(1)
logObjectPrefix = _messages.StringField(2)
class OwnerValue(_messages.Message):
r"""The owner of the bucket. This is always the project team's owner
group.
Fields:
entity: The entity, in the form project-owner-projectId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
class RetentionPolicyValue(_messages.Message):
r"""The bucket's retention policy. The retention policy enforces a minimum
retention time for all objects contained in the bucket, based on their
creation time. Any attempt to overwrite or delete objects younger than the
retention period will result in a PERMISSION_DENIED error. An unlocked
retention policy can be modified or removed from the bucket via a
storage.buckets.update operation. A locked retention policy cannot be
removed or shortened in duration for the lifetime of the bucket.
Attempting to remove or decrease period of a locked retention policy will
result in a PERMISSION_DENIED error.
Fields:
effectiveTime: Server-determined value that indicates the time from
which policy was enforced and effective. This value is in RFC 3339
format.
isLocked: Once locked, an object retention policy cannot be modified.
retentionPeriod: The duration in seconds that objects need to be
retained. Retention duration must be greater than zero and less than
100 years. Note that enforcement of retention periods less than a day
is not guaranteed. Such periods should only be used for testing
purposes.
"""
effectiveTime = _message_types.DateTimeField(1)
isLocked = _messages.BooleanField(2)
retentionPeriod = _messages.IntegerField(3)
class VersioningValue(_messages.Message):
r"""The bucket's versioning configuration.
Fields:
enabled: While set to true, versioning is fully enabled for this bucket.
"""
enabled = _messages.BooleanField(1)
class WebsiteValue(_messages.Message):
r"""The bucket's website configuration, controlling how the service
behaves when accessing bucket contents as a web site. See the Static
Website Examples for more information.
Fields:
mainPageSuffix: If the requested object path is missing, the service
will ensure the path has a trailing '/', append this suffix, and
attempt to retrieve the resulting object. This allows the creation of
index.html objects to represent directory pages.
notFoundPage: If the requested object path is missing, and any
mainPageSuffix object is missing, if applicable, the service will
return the named object from this bucket as the content for a 404 Not
Found result.
"""
mainPageSuffix = _messages.StringField(1)
notFoundPage = _messages.StringField(2)
acl = _messages.MessageField('BucketAccessControl', 1, repeated=True)
billing = _messages.MessageField('BillingValue', 2)
cors = _messages.MessageField('CorsValueListEntry', 3, repeated=True)
defaultEventBasedHold = _messages.BooleanField(4)
defaultObjectAcl = _messages.MessageField('ObjectAccessControl', 5, repeated=True)
encryption = _messages.MessageField('EncryptionValue', 6)
etag = _messages.StringField(7)
iamConfiguration = _messages.MessageField('IamConfigurationValue', 8)
id = _messages.StringField(9)
kind = _messages.StringField(10, default='storage#bucket')
labels = _messages.MessageField('LabelsValue', 11)
lifecycle = _messages.MessageField('LifecycleValue', 12)
location = _messages.StringField(13)
locationType = _messages.StringField(14)
logging = _messages.MessageField('LoggingValue', 15)
metageneration = _messages.IntegerField(16)
name = _messages.StringField(17)
owner = _messages.MessageField('OwnerValue', 18)
projectNumber = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
retentionPolicy = _messages.MessageField('RetentionPolicyValue', 20)
selfLink = _messages.StringField(21)
storageClass = _messages.StringField(22)
timeCreated = _message_types.DateTimeField(23)
updated = _message_types.DateTimeField(24)
versioning = _messages.MessageField('VersioningValue', 25)
website = _messages.MessageField('WebsiteValue', 26)
zoneAffinity = _messages.StringField(27, repeated=True)
zoneSeparation = _messages.BooleanField(28)
class BucketAccessControl(_messages.Message):
r"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user [email protected] would be [email protected]. -
The group [email protected] would be group-
[email protected]. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
id: The ID of the access-control entry.
kind: The kind of item this is. For bucket access control entries, this is
always storage#bucketAccessControl.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
r"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default='storage#bucketAccessControl')
projectTeam = _messages.MessageField('ProjectTeamValue', 9)
role = _messages.StringField(10)
selfLink = _messages.StringField(11)
class BucketAccessControls(_messages.Message):
r"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of bucket access control
entries, this is always storage#bucketAccessControls.
"""
items = _messages.MessageField('BucketAccessControl', 1, repeated=True)
kind = _messages.StringField(2, default='storage#bucketAccessControls')
class Buckets(_messages.Message):
r"""A list of buckets.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of buckets, this is always
storage#buckets.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
"""
items = _messages.MessageField('Bucket', 1, repeated=True)
kind = _messages.StringField(2, default='storage#buckets')
nextPageToken = _messages.StringField(3)
class Channel(_messages.Message):
r"""An notification channel used to watch for resource changes.
Messages:
ParamsValue: Additional parameters controlling delivery channel behavior.
Optional.
Fields:
address: The address where notifications are delivered for this channel.
expiration: Date and time of notification channel expiration, expressed as
a Unix timestamp, in milliseconds. Optional.
id: A UUID or similar unique string that identifies this channel.
kind: Identifies this as a notification channel used to watch for changes
to a resource, which is "api#channel".
params: Additional parameters controlling delivery channel behavior.
Optional.
payload: A Boolean value to indicate whether payload is wanted. Optional.
resourceId: An opaque ID that identifies the resource being watched on
this channel. Stable across different API versions.
resourceUri: A version-specific identifier for the watched resource.
token: An arbitrary string delivered to the target address with each
notification delivered over this channel. Optional.
type: The type of delivery mechanism used for this channel.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParamsValue(_messages.Message):
r"""Additional parameters controlling delivery channel behavior. Optional.
Messages:
AdditionalProperty: An additional property for a ParamsValue object.
Fields:
additionalProperties: Declares a new parameter by name.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParamsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
address = _messages.StringField(1)
expiration = _messages.IntegerField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default='api#channel')
params = _messages.MessageField('ParamsValue', 5)
payload = _messages.BooleanField(6)
resourceId = _messages.StringField(7)
resourceUri = _messages.StringField(8)
token = _messages.StringField(9)
type = _messages.StringField(10)
class Channels(_messages.Message):
r"""A channels list response.
Messages:
ItemsValueListEntry: A ItemsValueListEntry object.
Fields:
items: The list of notification channels for a bucket.
kind: The kind of item this is.
"""
class ItemsValueListEntry(_messages.Message):
r"""A ItemsValueListEntry object.
Fields:
channel_id: User-specified name for a channel. Needed to unsubscribe.
creation_time_ms: 64-bit Unix timestamp in ms of when the channel was
created.
push_url: Url used to identify where notifications are sent to.
resource_id: Opaque value generated by GCS representing a bucket. Needed
to unsubscribe.
subscriber_email: Email address of the subscriber.
"""
channel_id = _messages.StringField(1)
creation_time_ms = _messages.IntegerField(2)
push_url = _messages.StringField(3)
resource_id = _messages.StringField(4)
subscriber_email = _messages.StringField(5)
items = _messages.MessageField('ItemsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default='storage#channels')
class ComposeRequest(_messages.Message):
r"""A Compose request.
Messages:
SourceObjectsValueListEntry: A SourceObjectsValueListEntry object.
Fields:
destination: Properties of the resulting object.
kind: The kind of item this is.
sourceObjects: The list of source objects that will be concatenated into a
single object.
"""
class SourceObjectsValueListEntry(_messages.Message):
r"""A SourceObjectsValueListEntry object.
Messages:
ObjectPreconditionsValue: Conditions that must be met for this operation
to execute.
Fields:
generation: The generation of this object to use as the source.
name: The source object's name. All source objects must reside in the
same bucket.
objectPreconditions: Conditions that must be met for this operation to
execute.
"""
class ObjectPreconditionsValue(_messages.Message):
r"""Conditions that must be met for this operation to execute.
Fields:
ifGenerationMatch: Only perform the composition if the generation of
the source object that would be used matches this value. If this
value and a generation are both specified, they must be the same
value or the call will fail.
"""
ifGenerationMatch = _messages.IntegerField(1)
generation = _messages.IntegerField(1)
name = _messages.StringField(2)
objectPreconditions = _messages.MessageField('ObjectPreconditionsValue', 3)
destination = _messages.MessageField('Object', 1)
kind = _messages.StringField(2, default='storage#composeRequest')
sourceObjects = _messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True)
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account presence"
description: "Determines whether the request has a user account" expression:
"size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class HmacKey(_messages.Message):
r"""JSON template to produce a JSON-style HMAC Key resource for Create
responses.
Fields:
kind: The kind of item this is. For HMAC keys, this is always
storage#hmacKey.
metadata: Key metadata.
secret: HMAC secret key material.
"""
kind = _messages.StringField(1, default='storage#hmacKey')
metadata = _messages.MessageField('HmacKeyMetadata', 2)
secret = _messages.StringField(3)
class HmacKeyMetadata(_messages.Message):
r"""JSON template to produce a JSON-style HMAC Key metadata resource.
Fields:
accessId: The ID of the HMAC Key.
etag: HTTP 1.1 Entity tag for the HMAC key.
id: The ID of the HMAC key, including the Project ID and the Access ID.
kind: The kind of item this is. For HMAC Key metadata, this is always
storage#hmacKeyMetadata.
projectId: Project ID owning the service account to which the key
authenticates.
selfLink: The link to this resource.
serviceAccountEmail: The email address of the key's associated service
account.
state: The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED.
timeCreated: The creation time of the HMAC key in RFC 3339 format.
updated: The last modification time of the HMAC key metadata in RFC 3339
format.
"""
accessId = _messages.StringField(1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default='storage#hmacKeyMetadata')
projectId = _messages.StringField(5)
selfLink = _messages.StringField(6)
serviceAccountEmail = _messages.StringField(7)
state = _messages.StringField(8)
timeCreated = _message_types.DateTimeField(9)
updated = _message_types.DateTimeField(10)
class HmacKeysMetadata(_messages.Message):
r"""A list of hmacKeys.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of hmacKeys, this is always
storage#hmacKeysMetadata.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
"""
items = _messages.MessageField('HmacKeyMetadata', 1, repeated=True)
kind = _messages.StringField(2, default='storage#hmacKeysMetadata')
nextPageToken = _messages.StringField(3)
class Notification(_messages.Message):
r"""A subscription to receive Google PubSub notifications.
Messages:
CustomAttributesValue: An optional list of additional attributes to attach
to each Cloud PubSub message published for this notification
subscription.
Fields:
custom_attributes: An optional list of additional attributes to attach to
each Cloud PubSub message published for this notification subscription.
etag: HTTP 1.1 Entity tag for this subscription notification.
event_types: If present, only send notifications about listed event types.
If empty, sent notifications for all event types.
id: The ID of the notification.
kind: The kind of item this is. For notifications, this is always
storage#notification.
object_name_prefix: If present, only apply this notification configuration
to object names that begin with this prefix.
payload_format: The desired content of the Payload.
selfLink: The canonical URL of this notification.
topic: The Cloud PubSub topic to which this subscription publishes.
Formatted as: '//pubsub.googleapis.com/projects/{project-
identifier}/topics/{my-topic}'
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CustomAttributesValue(_messages.Message):
r"""An optional list of additional attributes to attach to each Cloud
PubSub message published for this notification subscription.
Messages:
AdditionalProperty: An additional property for a CustomAttributesValue
object.
Fields:
additionalProperties: Additional properties of type
CustomAttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a CustomAttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
custom_attributes = _messages.MessageField('CustomAttributesValue', 1)
etag = _messages.StringField(2)
event_types = _messages.StringField(3, repeated=True)
id = _messages.StringField(4)
kind = _messages.StringField(5, default='storage#notification')
object_name_prefix = _messages.StringField(6)
payload_format = _messages.StringField(7, default='JSON_API_V1')
selfLink = _messages.StringField(8)
topic = _messages.StringField(9)
class Notifications(_messages.Message):
r"""A list of notification subscriptions.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of notifications, this is always
storage#notifications.
"""
items = _messages.MessageField('Notification', 1, repeated=True)
kind = _messages.StringField(2, default='storage#notifications')
class Object(_messages.Message):
r"""An object.
Messages:
CustomerEncryptionValue: Metadata of customer-supplied encryption key, if
the object is encrypted by such a key.
MetadataValue: User-provided metadata, in key/value pairs.
OwnerValue: The owner of the object. This will always be the uploader of
the object.
Fields:
acl: Access controls on the object.
bucket: The name of the bucket containing this object.
cacheControl: Cache-Control directive for the object data. If omitted, and
the object is accessible to all anonymous users, the default will be
public, max-age=3600.
componentCount: Number of underlying components that make up this object.
Components are accumulated by compose operations.
contentDisposition: Content-Disposition of the object data.
contentEncoding: Content-Encoding of the object data.
contentLanguage: Content-Language of the object data.
contentType: Content-Type of the object data. If an object is stored
without a Content-Type, it is served as application/octet-stream.
crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded
using base64 in big-endian byte order. For more information about using
the CRC32c checksum, see Hashes and ETags: Best Practices.
customTime: A timestamp in RFC 3339 format specified by the user for an
object.
customerEncryption: Metadata of customer-supplied encryption key, if the
object is encrypted by such a key.
etag: HTTP 1.1 Entity tag for the object.
eventBasedHold: Whether an object is under event-based hold. Event-based
hold is a way to retain objects until an event occurs, which is
signified by the hold's release (i.e. this value is set to false). After
being released (set to false), such objects will be subject to bucket-
level retention (if any). One sample use case of this flag is for banks
to hold loan documents for at least 3 years after loan is paid in full.
Here, bucket-level retention is 3 years and the event is the loan being
paid in full. In this example, these objects will be held intact for any
number of years until the event has occurred (event-based hold on the
object is released) and then 3 more years after that. That means
retention duration of the objects begins from the moment event-based
hold transitioned from true to false.
generation: The content generation of this object. Used for object
versioning.
id: The ID of the object, including the bucket name, object name, and
generation number.
kind: The kind of item this is. For objects, this is always
storage#object.
kmsKeyName: Cloud KMS Key used to encrypt this object, if the object is
encrypted by such a key.
md5Hash: MD5 hash of the data; encoded using base64. For more information
about using the MD5 hash, see Hashes and ETags: Best Practices.
mediaLink: Media download link.
metadata: User-provided metadata, in key/value pairs.
metageneration: The version of the metadata for this object at this
generation. Used for preconditions and for detecting changes in
metadata. A metageneration number is only meaningful in the context of a
particular generation of a particular object.
name: The name of the object. Required if not specified by URL parameter.
owner: The owner of the object. This will always be the uploader of the
object.
retentionExpirationTime: A server-determined value that specifies the
earliest time that the object's retention period expires. This value is
in RFC 3339 format. Note 1: This field is not provided for objects with
an active event-based hold, since retention expiration is unknown until
the hold is removed. Note 2: This value can be provided even when
temporary hold is set (so that the user can reason about policy without
having to first unset the temporary hold).
selfLink: The link to this object.
size: Content-Length of the data in bytes.
storageClass: Storage class of the object.
temporaryHold: Whether an object is under temporary hold. While this flag
is set to true, the object is protected against deletion and overwrites.
A common use case of this flag is regulatory investigations where
objects need to be retained while the investigation is ongoing. Note
that unlike event-based hold, temporary hold does not impact retention
expiration time of an object.
timeCreated: The creation time of the object in RFC 3339 format.
timeDeleted: The deletion time of the object in RFC 3339 format. Will be
returned if and only if this version of the object has been deleted.
timeStorageClassUpdated: The time at which the object's storage class was
last changed. When the object is initially created, it will be set to
timeCreated.
updated: The modification time of the object metadata in RFC 3339 format.
"""
class CustomerEncryptionValue(_messages.Message):
r"""Metadata of customer-supplied encryption key, if the object is
encrypted by such a key.
Fields:
encryptionAlgorithm: The encryption algorithm.
keySha256: SHA256 hash value of the encryption key.
"""
encryptionAlgorithm = _messages.StringField(1)
keySha256 = _messages.StringField(2)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""User-provided metadata, in key/value pairs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: An individual metadata entry.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class OwnerValue(_messages.Message):
r"""The owner of the object. This will always be the uploader of the
object.
Fields:
entity: The entity, in the form user-userId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
acl = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
bucket = _messages.StringField(2)
cacheControl = _messages.StringField(3)
componentCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
contentDisposition = _messages.StringField(5)
contentEncoding = _messages.StringField(6)
contentLanguage = _messages.StringField(7)
contentType = _messages.StringField(8)
crc32c = _messages.StringField(9)
customTime = _message_types.DateTimeField(10)
customerEncryption = _messages.MessageField('CustomerEncryptionValue', 11)
etag = _messages.StringField(12)
eventBasedHold = _messages.BooleanField(13)
generation = _messages.IntegerField(14)
id = _messages.StringField(15)
kind = _messages.StringField(16, default='storage#object')
kmsKeyName = _messages.StringField(17)
md5Hash = _messages.StringField(18)
mediaLink = _messages.StringField(19)
metadata = _messages.MessageField('MetadataValue', 20)
metageneration = _messages.IntegerField(21)
name = _messages.StringField(22)
owner = _messages.MessageField('OwnerValue', 23)
retentionExpirationTime = _message_types.DateTimeField(24)
selfLink = _messages.StringField(25)
size = _messages.IntegerField(26, variant=_messages.Variant.UINT64)
storageClass = _messages.StringField(27)
temporaryHold = _messages.BooleanField(28)
timeCreated = _message_types.DateTimeField(29)
timeDeleted = _message_types.DateTimeField(30)
timeStorageClassUpdated = _message_types.DateTimeField(31)
updated = _message_types.DateTimeField(32)
class ObjectAccessControl(_messages.Message):
r"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user [email protected] would be [email protected]. -
The group [email protected] would be group-
[email protected]. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
generation: The content generation of the object, if applied to an object.
id: The ID of the access-control entry.
kind: The kind of item this is. For object access control entries, this is
always storage#objectAccessControl.
object: The name of the object, if applied to an object.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
r"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
generation = _messages.IntegerField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default='storage#objectAccessControl')
object = _messages.StringField(10)
projectTeam = _messages.MessageField('ProjectTeamValue', 11)
role = _messages.StringField(12)
selfLink = _messages.StringField(13)
class ObjectAccessControls(_messages.Message):
r"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of object access control
entries, this is always storage#objectAccessControls.
"""
items = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
kind = _messages.StringField(2, default='storage#objectAccessControls')
class Objects(_messages.Message):
r"""A list of objects.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of objects, this is always
storage#objects.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
prefixes: The list of prefixes of objects matching-but-not-listed up to
and including the requested delimiter.
"""
items = _messages.MessageField('Object', 1, repeated=True)
kind = _messages.StringField(2, default='storage#objects')
nextPageToken = _messages.StringField(3)
prefixes = _messages.StringField(4, repeated=True)
class Policy(_messages.Message):
r"""A bucket/object IAM policy.
Messages:
BindingsValueListEntry: A BindingsValueListEntry object.
Fields:
bindings: An association between a role, which comes with a set of
permissions, and members who may assume that role.
etag: HTTP 1.1 Entity tag for the policy.
kind: The kind of item this is. For policies, this is always
storage#policy. This field is ignored on input.
resourceId: The ID of the resource to which this policy belongs. Will be
of the form projects/_/buckets/bucket for buckets, and
projects/_/buckets/bucket/objects/object for objects. A specific
generation may be specified by appending #generationNumber to the end of
the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17.
The current generation can be denoted with #0. This field is ignored on
input.
version: The IAM policy format version.
"""
class BindingsValueListEntry(_messages.Message):
r"""A BindingsValueListEntry object.
Fields:
condition: The condition that is associated with this binding. NOTE: an
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: A collection of identifiers for members who may assume the
provided role. Recognized identifiers are as follows: - allUsers - A
special identifier that represents anyone on the internet; with or
without a Google account. - allAuthenticatedUsers - A special
identifier that represents anyone who is authenticated with a Google
account or a service account. - user:emailid - An email address that
represents a specific account. For example, user:[email protected] or
user:[email protected]. - serviceAccount:emailid - An email address
that represents a service account. For example, serviceAccount:my-
[email protected] . - group:emailid - An email
address that represents a Google group. For example,
group:[email protected]. - domain:domain - A Google Apps domain
name that represents all the users of that domain. For example,
domain:google.com or domain:example.com. - projectOwner:projectid -
Owners of the given project. For example, projectOwner:my-example-
project - projectEditor:projectid - Editors of the given project.
For example, projectEditor:my-example-project -
projectViewer:projectid - Viewers of the given project. For example,
projectViewer:my-example-project
role: The role to which members belong. Two types of roles are
supported: new IAM roles, which grant permissions that do not map
directly to those provided by ACLs, and legacy IAM roles, which do map
directly to ACL permissions. All roles are of the format
roles/storage.specificRole. The new IAM roles are: -
roles/storage.admin - Full control of Google Cloud Storage resources.
- roles/storage.objectViewer - Read-Only access to Google Cloud
Storage objects. - roles/storage.objectCreator - Access to create
objects in Google Cloud Storage. - roles/storage.objectAdmin - Full
control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader - Read-only access to objects
without listing. Equivalent to an ACL entry on an object with the
READER role. - roles/storage.legacyObjectOwner - Read/write access
to existing objects without listing. Equivalent to an ACL entry on an
object with the OWNER role. - roles/storage.legacyBucketReader -
Read access to buckets with object listing. Equivalent to an ACL entry
on a bucket with the READER role. - roles/storage.legacyBucketWriter
- Read access to buckets with object listing/creation/deletion.
Equivalent to an ACL entry on a bucket with the WRITER role. -
roles/storage.legacyBucketOwner - Read and write access to existing
buckets with object listing/creation/deletion. Equivalent to an ACL
entry on a bucket with the OWNER role.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
bindings = _messages.MessageField('BindingsValueListEntry', 1, repeated=True)
etag = _messages.BytesField(2)
kind = _messages.StringField(3, default='storage#policy')
resourceId = _messages.StringField(4)
version = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class RewriteResponse(_messages.Message):
r"""A rewrite response.
Fields:
done: true if the copy is finished; otherwise, false if the copy is in
progress. This property is always present in the response.
kind: The kind of item this is.
objectSize: The total size of the object being copied in bytes. This
property is always present in the response.
resource: A resource containing the metadata for the copied-to object.
This property is present in the response only when copying completes.
rewriteToken: A token to use in subsequent requests to continue copying
data. This token is present in the response only when there is more data
to copy.
totalBytesRewritten: The total bytes written so far, which can be used to
provide a waiting user with a progress indicator. This property is
always present in the response.
"""
done = _messages.BooleanField(1)
kind = _messages.StringField(2, default='storage#rewriteResponse')
objectSize = _messages.IntegerField(3)
resource = _messages.MessageField('Object', 4)
rewriteToken = _messages.StringField(5)
totalBytesRewritten = _messages.IntegerField(6)
class ServiceAccount(_messages.Message):
r"""A subscription to receive Google PubSub notifications.
Fields:
email_address: The ID of the notification.
kind: The kind of item this is. For notifications, this is always
storage#notification.
"""
email_address = _messages.StringField(1)
kind = _messages.StringField(2, default='storage#serviceAccount')
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: An opaque string that represents a user for quota purposes.
Must not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: Deprecated. Please use quotaUser instead.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default='json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class StorageBucketAccessControlsDeleteRequest(_messages.Message):
r"""A StorageBucketAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageBucketAccessControlsDelete response."""
class StorageBucketAccessControlsGetRequest(_messages.Message):
r"""A StorageBucketAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketAccessControlsInsertRequest(_messages.Message):
r"""A StorageBucketAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
bucketAccessControl: A BucketAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
bucketAccessControl = _messages.MessageField('BucketAccessControl', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketAccessControlsListRequest(_messages.Message):
r"""A StorageBucketAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class StorageBucketAccessControlsPatchRequest(_messages.Message):
r"""A StorageBucketAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
bucketAccessControl: A BucketAccessControl resource to be passed as the
request body.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
bucketAccessControl = _messages.MessageField('BucketAccessControl', 2)
entity = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageBucketAccessControlsUpdateRequest(_messages.Message):
r"""A StorageBucketAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
bucketAccessControl: A BucketAccessControl resource to be passed as the
request body.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
bucketAccessControl = _messages.MessageField('BucketAccessControl', 2)
entity = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageBucketsDeleteRequest(_messages.Message):
r"""A StorageBucketsDeleteRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If set, only deletes the bucket if its
metageneration matches this value.
ifMetagenerationNotMatch: If set, only deletes the bucket if its
metageneration does not match this value.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageBucketsDeleteResponse(_messages.Message):
r"""An empty StorageBucketsDelete response."""
class StorageBucketsGetIamPolicyRequest(_messages.Message):
r"""A StorageBucketsGetIamPolicyRequest object.
Fields:
bucket: Name of a bucket.
optionsRequestedPolicyVersion: The IAM policy format version to be
returned. If the optionsRequestedPolicyVersion is for an older version
that doesn't support part of the requested IAM policy, the request
fails.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
optionsRequestedPolicyVersion = _messages.IntegerField(2, variant=_messages.Variant.INT32)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsGetRequest(_messages.Message):
r"""A StorageBucketsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
projection = _messages.EnumField('ProjectionValueValuesEnum', 4)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageBucketsInsertRequest(_messages.Message):
r"""A StorageBucketsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the bucket resource specifies acl or defaultObjectAcl properties,
when it defaults to full.
Fields:
bucket: A Bucket resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl, unless the
bucket resource specifies acl or defaultObjectAcl properties, when it
defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the bucket
resource specifies acl or defaultObjectAcl properties, when it defaults to
full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.MessageField('Bucket', 1)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 2)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageBucketsListChannelsRequest(_messages.Message):
r"""A StorageBucketsListChannelsRequest object.
Fields:
bucket: Name of a bucket.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class StorageBucketsListRequest(_messages.Message):
r"""A StorageBucketsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
maxResults: Maximum number of buckets to return in a single response. The
service will use this parameter or 1,000 items, whichever is smaller.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to buckets whose names begin with this prefix.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32, default=1000)
pageToken = _messages.StringField(2)
prefix = _messages.StringField(3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageBucketsLockRetentionPolicyRequest(_messages.Message):
r"""A StorageBucketsLockRetentionPolicyRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: Makes the operation conditional on whether bucket's
current metageneration matches the given value.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsPatchRequest(_messages.Message):
r"""A StorageBucketsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageBucketsSetIamPolicyRequest(_messages.Message):
r"""A StorageBucketsSetIamPolicyRequest object.
Fields:
bucket: Name of a bucket.
policy: A Policy resource to be passed as the request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
policy = _messages.MessageField('Policy', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsTestIamPermissionsRequest(_messages.Message):
r"""A StorageBucketsTestIamPermissionsRequest object.
Fields:
bucket: Name of a bucket.
permissions: Permissions to test.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
permissions = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsUpdateRequest(_messages.Message):
r"""A StorageBucketsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageChannelsStopResponse(_messages.Message):
r"""An empty StorageChannelsStop response."""
class StorageDefaultObjectAccessControlsDeleteRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageDefaultObjectAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageDefaultObjectAccessControlsDelete response."""
class StorageDefaultObjectAccessControlsGetRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageDefaultObjectAccessControlsInsertRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageDefaultObjectAccessControlsListRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If present, only return default ACL listing if the
bucket's current metageneration matches this value.
ifMetagenerationNotMatch: If present, only return default ACL listing if
the bucket's current metageneration does not match the given value.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageDefaultObjectAccessControlsPatchRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageDefaultObjectAccessControlsUpdateRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageNotificationsDeleteRequest(_messages.Message):
r"""A StorageNotificationsDeleteRequest object.
Fields:
bucket: The parent bucket of the notification.
notification: ID of the notification to delete.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
notification = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageNotificationsDeleteResponse(_messages.Message):
r"""An empty StorageNotificationsDelete response."""
class StorageNotificationsGetRequest(_messages.Message):
r"""A StorageNotificationsGetRequest object.
Fields:
bucket: The parent bucket of the notification.
notification: Notification ID
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
notification = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageNotificationsInsertRequest(_messages.Message):
r"""A StorageNotificationsInsertRequest object.
Fields:
bucket: The parent bucket of the notification.
notification: A Notification resource to be passed as the request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
notification = _messages.MessageField('Notification', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageNotificationsListRequest(_messages.Message):
r"""A StorageNotificationsListRequest object.
Fields:
bucket: Name of a Google Cloud Storage bucket.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class StorageObjectAccessControlsDeleteRequest(_messages.Message):
r"""A StorageObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageObjectAccessControlsDelete response."""
class StorageObjectAccessControlsGetRequest(_messages.Message):
r"""A StorageObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectAccessControlsInsertRequest(_messages.Message):
r"""A StorageObjectAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 4)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectAccessControlsListRequest(_messages.Message):
r"""A StorageObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageObjectAccessControlsPatchRequest(_messages.Message):
r"""A StorageObjectAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageObjectAccessControlsUpdateRequest(_messages.Message):
r"""A StorageObjectAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageObjectsComposeRequest(_messages.Message):
r"""A StorageObjectsComposeRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
Fields:
composeRequest: A ComposeRequest resource to be passed as the request
body.
destinationBucket: Name of the bucket containing the source objects. The
destination object is stored in this bucket.
destinationObject: Name of the new object. For information about how to
URL encode object names to be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
kmsKeyName: Resource name of the Cloud KMS key, of the form projects/my-
project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be
used to encrypt the object. Overrides the object metadata's kms_key_name
value, if any.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
composeRequest = _messages.MessageField('ComposeRequest', 1)
destinationBucket = _messages.StringField(2, required=True)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
kmsKeyName = _messages.StringField(7)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageObjectsCopyRequest(_messages.Message):
r"""A StorageObjectsCopyRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.For
information about how to URL encode object names to be path safe, see
Encoding URI Path Parts.
destinationKmsKeyName: Resource name of the Cloud KMS key, of the form
projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
that will be used to encrypt the object. Overrides the object metadata's
kms_key_name value, if any.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the
destination object's current generation matches the given value. Setting
to 0 makes the operation succeed only if there are no live versions of
the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
destination object's current generation does not match the given value.
If no live object exists, the precondition fails. Setting to 0 makes the
operation succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's current generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's current generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationKmsKeyName = _messages.StringField(2)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifGenerationNotMatch = _messages.IntegerField(6)
ifMetagenerationMatch = _messages.IntegerField(7)
ifMetagenerationNotMatch = _messages.IntegerField(8)
ifSourceGenerationMatch = _messages.IntegerField(9)
ifSourceGenerationNotMatch = _messages.IntegerField(10)
ifSourceMetagenerationMatch = _messages.IntegerField(11)
ifSourceMetagenerationNotMatch = _messages.IntegerField(12)
object = _messages.MessageField('Object', 13)
projection = _messages.EnumField('ProjectionValueValuesEnum', 14)
provisionalUserProject = _messages.StringField(15)
sourceBucket = _messages.StringField(16, required=True)
sourceGeneration = _messages.IntegerField(17)
sourceObject = _messages.StringField(18, required=True)
userProject = _messages.StringField(19)
class StorageObjectsDeleteRequest(_messages.Message):
r"""A StorageObjectsDeleteRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, permanently deletes a specific revision of this
object (as opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageObjectsDeleteResponse(_messages.Message):
r"""An empty StorageObjectsDelete response."""
class StorageObjectsGetIamPolicyRequest(_messages.Message):
r"""A StorageObjectsGetIamPolicyRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageObjectsGetRequest(_messages.Message):
r"""A StorageObjectsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
provisionalUserProject = _messages.StringField(9)
userProject = _messages.StringField(10)
class StorageObjectsInsertRequest(_messages.Message):
r"""A StorageObjectsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
bucket: Name of the bucket in which to store the new object. Overrides the
provided object metadata's bucket value, if any.
contentEncoding: If set, sets the contentEncoding property of the final
object to this value. Setting this parameter is equivalent to setting
the contentEncoding metadata property. This can be useful when uploading
an object with uploadType=media to indicate the encoding of the content
being uploaded.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
kmsKeyName: Resource name of the Cloud KMS key, of the form projects/my-
project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be
used to encrypt the object. Overrides the object metadata's kms_key_name
value, if any.
name: Name of the object. Required when the object metadata is not
otherwise provided. Overrides the object metadata's name value, if any.
For information about how to URL encode object names to be path safe,
see Encoding URI Path Parts.
object: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
contentEncoding = _messages.StringField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
kmsKeyName = _messages.StringField(7)
name = _messages.StringField(8)
object = _messages.MessageField('Object', 9)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 10)
projection = _messages.EnumField('ProjectionValueValuesEnum', 11)
provisionalUserProject = _messages.StringField(12)
userProject = _messages.StringField(13)
class StorageObjectsListRequest(_messages.Message):
r"""A StorageObjectsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
endOffset: Filter results to objects whose names are lexicographically
before endOffset. If startOffset is also set, the objects listed will
have names between startOffset (inclusive) and endOffset (exclusive).
includeTrailingDelimiter: If true, objects that end in exactly one
instance of delimiter will have their metadata included in items in
addition to prefixes.
maxResults: Maximum number of items plus prefixes to return in a single
page of responses. As duplicate prefixes are omitted, fewer total
results may be returned than requested. The service will use this
parameter or 1,000 items, whichever is smaller.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
startOffset: Filter results to objects whose names are lexicographically
equal to or after startOffset. If endOffset is also set, the objects
listed will have names between startOffset (inclusive) and endOffset
(exclusive).
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
delimiter = _messages.StringField(2)
endOffset = _messages.StringField(3)
includeTrailingDelimiter = _messages.BooleanField(4)
maxResults = _messages.IntegerField(5, variant=_messages.Variant.UINT32, default=1000)
pageToken = _messages.StringField(6)
prefix = _messages.StringField(7)
projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
provisionalUserProject = _messages.StringField(9)
startOffset = _messages.StringField(10)
userProject = _messages.StringField(11)
versions = _messages.BooleanField(12)
class StorageObjectsPatchRequest(_messages.Message):
r"""A StorageObjectsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request, for Requester Pays
buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
provisionalUserProject = _messages.StringField(11)
userProject = _messages.StringField(12)
class StorageObjectsRewriteRequest(_messages.Message):
r"""A StorageObjectsRewriteRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.
destinationKmsKeyName: Resource name of the Cloud KMS key, of the form
projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
that will be used to encrypt the object. Overrides the object metadata's
kms_key_name value, if any.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any. For information about how to URL encode object names to
be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's current generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's current generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
maxBytesRewrittenPerCall: The maximum number of bytes that will be
rewritten per rewrite request. Most callers shouldn't need to specify
this parameter - it is primarily in place to support testing. If
specified the value must be an integral multiple of 1 MiB (1048576).
Also, this only applies to requests where the source and destination
span locations and/or storage classes. Finally, this value must not
change across rewrite calls else you'll get an error that the
rewriteToken is invalid.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
rewriteToken: Include this field (from the previous rewrite response) on
each rewrite request after the first one, until the rewrite response
'done' flag is true. Calls that provide a rewriteToken can omit all
other request fields, but if included those fields must match the values
provided in the first rewrite request.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationKmsKeyName = _messages.StringField(2)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifGenerationNotMatch = _messages.IntegerField(6)
ifMetagenerationMatch = _messages.IntegerField(7)
ifMetagenerationNotMatch = _messages.IntegerField(8)
ifSourceGenerationMatch = _messages.IntegerField(9)
ifSourceGenerationNotMatch = _messages.IntegerField(10)
ifSourceMetagenerationMatch = _messages.IntegerField(11)
ifSourceMetagenerationNotMatch = _messages.IntegerField(12)
maxBytesRewrittenPerCall = _messages.IntegerField(13)
object = _messages.MessageField('Object', 14)
projection = _messages.EnumField('ProjectionValueValuesEnum', 15)
provisionalUserProject = _messages.StringField(16)
rewriteToken = _messages.StringField(17)
sourceBucket = _messages.StringField(18, required=True)
sourceGeneration = _messages.IntegerField(19)
sourceObject = _messages.StringField(20, required=True)
userProject = _messages.StringField(21)
class StorageObjectsSetIamPolicyRequest(_messages.Message):
r"""A StorageObjectsSetIamPolicyRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
policy: A Policy resource to be passed as the request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
policy = _messages.MessageField('Policy', 4)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectsTestIamPermissionsRequest(_messages.Message):
r"""A StorageObjectsTestIamPermissionsRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
permissions: Permissions to test.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
permissions = _messages.StringField(4, required=True)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectsUpdateRequest(_messages.Message):
r"""A StorageObjectsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
provisionalUserProject = _messages.StringField(11)
userProject = _messages.StringField(12)
class StorageObjectsWatchAllRequest(_messages.Message):
r"""A StorageObjectsWatchAllRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
channel: A Channel resource to be passed as the request body.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
endOffset: Filter results to objects whose names are lexicographically
before endOffset. If startOffset is also set, the objects listed will
have names between startOffset (inclusive) and endOffset (exclusive).
includeTrailingDelimiter: If true, objects that end in exactly one
instance of delimiter will have their metadata included in items in
addition to prefixes.
maxResults: Maximum number of items plus prefixes to return in a single
page of responses. As duplicate prefixes are omitted, fewer total
results may be returned than requested. The service will use this
parameter or 1,000 items, whichever is smaller.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
startOffset: Filter results to objects whose names are lexicographically
equal to or after startOffset. If endOffset is also set, the objects
listed will have names between startOffset (inclusive) and endOffset
(exclusive).
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
channel = _messages.MessageField('Channel', 2)
delimiter = _messages.StringField(3)
endOffset = _messages.StringField(4)
includeTrailingDelimiter = _messages.BooleanField(5)
maxResults = _messages.IntegerField(6, variant=_messages.Variant.UINT32, default=1000)
pageToken = _messages.StringField(7)
prefix = _messages.StringField(8)
projection = _messages.EnumField('ProjectionValueValuesEnum', 9)
provisionalUserProject = _messages.StringField(10)
startOffset = _messages.StringField(11)
userProject = _messages.StringField(12)
versions = _messages.BooleanField(13)
class StorageProjectsHmacKeysCreateRequest(_messages.Message):
r"""A StorageProjectsHmacKeysCreateRequest object.
Fields:
projectId: Project ID owning the service account.
serviceAccountEmail: Email address of the service account.
userProject: The project to be billed for this request.
"""
projectId = _messages.StringField(1, required=True)
serviceAccountEmail = _messages.StringField(2, required=True)
userProject = _messages.StringField(3)
class StorageProjectsHmacKeysDeleteRequest(_messages.Message):
r"""A StorageProjectsHmacKeysDeleteRequest object.
Fields:
accessId: Name of the HMAC key to be deleted.
projectId: Project ID owning the requested key
userProject: The project to be billed for this request.
"""
accessId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
userProject = _messages.StringField(3)
class StorageProjectsHmacKeysDeleteResponse(_messages.Message):
r"""An empty StorageProjectsHmacKeysDelete response."""
class StorageProjectsHmacKeysGetRequest(_messages.Message):
r"""A StorageProjectsHmacKeysGetRequest object.
Fields:
accessId: Name of the HMAC key.
projectId: Project ID owning the service account of the requested key.
userProject: The project to be billed for this request.
"""
accessId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
userProject = _messages.StringField(3)
class StorageProjectsHmacKeysListRequest(_messages.Message):
r"""A StorageProjectsHmacKeysListRequest object.
Fields:
maxResults: Maximum number of items to return in a single page of
responses. The service uses this parameter or 250 items, whichever is
smaller. The max number of items per page will also be limited by the
number of distinct service accounts in the response. If the number of
service accounts in a single response is too high, the page will
truncated and a next page token will be returned.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
projectId: Name of the project in which to look for HMAC keys.
serviceAccountEmail: If present, only keys for the given service account
are returned.
showDeletedKeys: Whether or not to show keys in the DELETED state.
userProject: The project to be billed for this request.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32, default=250)
pageToken = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
serviceAccountEmail = _messages.StringField(4)
showDeletedKeys = _messages.BooleanField(5)
userProject = _messages.StringField(6)
class StorageProjectsHmacKeysUpdateRequest(_messages.Message):
r"""A StorageProjectsHmacKeysUpdateRequest object.
Fields:
accessId: Name of the HMAC key being updated.
hmacKeyMetadata: A HmacKeyMetadata resource to be passed as the request
body.
projectId: Project ID owning the service account of the updated key.
userProject: The project to be billed for this request.
"""
accessId = _messages.StringField(1, required=True)
hmacKeyMetadata = _messages.MessageField('HmacKeyMetadata', 2)
projectId = _messages.StringField(3, required=True)
userProject = _messages.StringField(4)
class StorageProjectsServiceAccountGetRequest(_messages.Message):
r"""A StorageProjectsServiceAccountGetRequest object.
Fields:
projectId: Project ID
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request.
"""
projectId = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class TestIamPermissionsResponse(_messages.Message):
r"""A storage.(buckets|objects).testIamPermissions response.
Fields:
kind: The kind of item this is.
permissions: The permissions held by the caller. Permissions are always of
the format storage.resource.capability, where resource is one of buckets
or objects. The supported permissions are as follows: -
storage.buckets.delete - Delete bucket. - storage.buckets.get - Read
bucket metadata. - storage.buckets.getIamPolicy - Read bucket IAM
policy. - storage.buckets.create - Create bucket. -
storage.buckets.list - List buckets. - storage.buckets.setIamPolicy -
Update bucket IAM policy. - storage.buckets.update - Update bucket
metadata. - storage.objects.delete - Delete object. -
storage.objects.get - Read object data and metadata. -
storage.objects.getIamPolicy - Read object IAM policy. -
storage.objects.create - Create object. - storage.objects.list - List
objects. - storage.objects.setIamPolicy - Update object IAM policy.
- storage.objects.update - Update object metadata.
"""
kind = _messages.StringField(1, default='storage#testIamPermissionsResponse')
permissions = _messages.StringField(2, repeated=True)
| [
"[email protected]"
] | |
05e8534070a93a7ccf28f2c4d005262247ce17ec | 55fe565e56894d04f6f95b5f228e5be6576fd039 | /youtube/models.py | fe1b1e340a1b955cb6101d6e190f4d9b6c56539f | [] | no_license | anandanimesh98/YouTube-Clone | 4d0fce6d279bada7cef83fa473f824310a108f41 | 4a3825d7241a710d6f6ee91153f0e3a81d973810 | refs/heads/master | 2023-03-21T03:30:13.609936 | 2020-06-18T16:28:42 | 2020-06-18T16:28:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | from django.conf import settings
from django.db import models
from django.utils import timezone
# Create your models here.
class Video(models.Model):
title = models.CharField(max_length=30)
description = models.TextField(max_length=300)
path = models.CharField(max_length=60)
datetime = models.DateTimeField(blank=False, null=False) #todo: auto_now=True
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
number_of_views = models.IntegerField(blank=True, default=0)
class Comment(models.Model):
text = models.TextField(max_length=300)
datetime = models.DateTimeField(auto_now=True, blank=False, null=False)
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
video = models.ForeignKey(Video, on_delete=models.CASCADE)
class Channel(models.Model):
channel_name = models.CharField(max_length=50, blank=False, null=False)
subscribers = models.IntegerField(default=0, blank=False, null=False)
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
class Like(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
video = models.ForeignKey(Video, on_delete=models.CASCADE)
class Dislike(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
video = models.ForeignKey(Video, on_delete=models.CASCADE)
class Video_View(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
video = models.ForeignKey(Video, on_delete=models.CASCADE)
datetime = models.DateTimeField(default=timezone.now)
class Channel_Subscription(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
channel = models.ForeignKey(Channel, on_delete=models.CASCADE)
| [
"[email protected]"
] | |
27ced895bc04e2692924f40836080ed478a9d412 | a2cde76898f72048f5ddfe2088147efaa83fdef8 | /users/api/serializers.py | 97c39fc3d7f421d49cfc63232341ecd04436ae1c | [] | no_license | dlin99/Django-Blog | f29b91c0c6031e3a5212dc091ddd22b8aab47a3a | 2d45b1f3d6b2a8ec1333b381abfe6e37e512efa1 | refs/heads/main | 2023-02-04T21:16:53.503476 | 2020-12-27T04:32:09 | 2020-12-27T04:32:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | from rest_framework.serializers import (
ModelSerializer,
HyperlinkedIdentityField,
SerializerMethodField,
ValidationError,
EmailField,
CharField,
)
from django.db.models import Q
# from django.contrib.contenttypes.models import ContentType
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework_jwt.settings import api_settings
class UserDetailSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'username',
'email',
'first_name',
'last_name',
]
class UserCreateSerializer(ModelSerializer):
email = EmailField(label="Email Address")
email2 = EmailField(label="Confirm Email")
class Meta:
model = User
fields = [
'username',
'email',
'email2',
'password',
]
extra_kwargs = {"password":
{"write_only": True}
}
def validate(self, data):
# email = data['email']
# user_qs = User.objects.filter(email=email)
# if user_qs.exists():
# raise ValidationError("This user has already registered.")
return data
def validate_email(self, value):
data = self.get_initial()
email1 = data.get("email2")
email2 = value
if email1 != email2:
raise ValidationError("Emails must match!")
user_qs = User.objects.filter(email=email2)
if user_qs.exists():
raise ValidationError("This email has already been registered.")
return value
def validate_email2(self, value):
data = self.get_initial()
email1 = data.get("email")
email2 = value
if email1 != email2:
raise ValidationError("Emails must match!")
return value
def create(self, validated_data):
username = validated_data['username']
email = validated_data['email']
password = validated_data['password']
user_obj = User(
username=username,
email=email
)
user_obj.set_password(password)
user_obj.save()
return validated_data
class UserLoginSerializer(ModelSerializer):
token = CharField(allow_blank=True, read_only=True)
username = CharField(required=False, allow_blank=True)
email = EmailField(label="Email Address", required=False, allow_blank=True)
class Meta:
model = User
fields = [
'username',
'email',
'password',
'token',
]
extra_kwargs = {"password":
{"write_only": True}
}
def validate(self, data):
user_obj = None
email = data.get("email", None)
username = data.get("username", None)
password = data["password"] # we must have password in our data
if not email and not username:
raise ValidationError("A username or email is required to login.")
user = User.objects.filter(
Q(email=email) |
Q(username=username)
).distinct()
user = user.exclude(email__isnull=True).exclude(email__iexact="")
if user.exists() and user.count() == 1:
user_obj = user.first()
else:
raise ValidationError("This username/email is not valid.")
if user_obj:
if not user_obj.check_password(password):
raise ValidationError("Incorrect credentials please try again.")
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
data["token"] = token
return data
| [
"[email protected]"
] | |
b1e81953ecec8bd5d6eaf8d990376717136ca91d | 26c613f84d3e245418014ec97e482d1fef4dcf89 | /Named-Entity-Classifier/a4.py | a2c252521ae449a65fe7cbc7eb5093953c1f76df | [] | no_license | shorabhd/Natural-Language-Processing | 08675b83ecdc448539acf34a5fd12460301600ad | 20492d9bce046253084452f64e8de246aee8516a | refs/heads/master | 2020-12-30T16:45:29.618937 | 2017-05-11T20:27:46 | 2017-05-11T20:27:46 | 91,019,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,880 | py |
# coding: utf-8
# In[3]:
import nltk
from gensim.models import word2vec
from collections import Counter
from itertools import product
import numpy as np
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
import urllib.request
import sys
# In[4]:
def download_data():
""" Download labeled data.
DONE ALREADY.
"""
url = 'https://www.dropbox.com/s/bqitsnhk911ndqs/train.txt?dl=1'
urllib.request.urlretrieve(url, 'train.txt')
url = 'https://www.dropbox.com/s/s4gdb9fjex2afxs/test.txt?dl=1'
urllib.request.urlretrieve(url, 'test.txt')
# In[5]:
def read_data(filename):
"""
Read the data file into a list of lists of tuples.
Each sentence is a list of tuples.
Each tuple contains four entries:
- the token
- the part of speech
- the phrase chunking tag
- the named entity tag
For example, the first two entries in the
returned result for 'train.txt' are:
> train_data = read_data('train.txt')
> train_data[:2]
[[('EU', 'NNP', 'I-NP', 'I-ORG'),
('rejects', 'VBZ', 'I-VP', 'O'),
('German', 'JJ', 'I-NP', 'I-MISC'),
('call', 'NN', 'I-NP', 'O'),
('to', 'TO', 'I-VP', 'O'),
('boycott', 'VB', 'I-VP', 'O'),
('British', 'JJ', 'I-NP', 'I-MISC'),
('lamb', 'NN', 'I-NP', 'O'),
('.', '.', 'O', 'O')],
[('Peter', 'NNP', 'I-NP', 'I-PER'), ('Blackburn', 'NNP', 'I-NP', 'I-PER')]]
"""
with open(filename) as f:
lines = f.readlines()
result = []
temp = []
for l,line in enumerate(lines):
if line is not '\n':
if '-DOCSTART-' not in line:
temp.append(tuple([token.strip() for token in line.split()]))
else:
result.append(temp)
temp = []
result.append(temp)
return result
pass
# In[6]:
def make_feature_dicts(data,w2v_model,token=True,caps=True,pos=True,
chunk=True,context=True,w2v=True):
"""
Create feature dictionaries, one per token. Each entry in the dict consists of a key (a string)
and a value of 1.
Also returns a numpy array of NER tags (strings), one per token.
See a3_test.
The parameter flags determine which features to compute.
Params:
data.......the data returned by read_data
token......If True, create a feature with key 'tok=X', where X is the *lower case* string for this token.
caps.......If True, create a feature 'is_caps' that is 1 if this token begins with a capital letter.
If the token does not begin with a capital letter, do not add the feature.
pos........If True, add a feature 'pos=X', where X is the part of speech tag for this token.
chunk......If True, add a feature 'chunk=X', where X is the chunk tag for this token
context....If True, add features that combine all the features for the previous and subsequent token.
E.g., if the prior token has features 'is_caps' and 'tok=a', then the features for the
current token will be augmented with 'prev_is_caps' and 'prev_tok=a'.
Similarly, if the subsequent token has features 'is_caps', then the features for the
current token will also include 'next_is_caps'.
Returns:
- A list of dicts, one per token, containing the features for that token.
- A numpy array, one per token, containing the NER tag for that token.
"""
dicts = []
labels = []
for list in data:
for t,tuple in enumerate(list):
d = {}
if(token):
d['tok='+tuple[0].lower()] = 1
if(caps):
if tuple[0][0].isupper():
d['is_caps'] = 1
if (pos):
d['pos='+tuple[1]] = 1
if (chunk):
d['chunk='+tuple[2]] = 1
if (context):
if (t > 0):
if (token):
d['prev_tok='+list[t-1][0].lower()] = 1
if (caps):
if list[t - 1][0][0].isupper():
d['prev_is_caps'] = 1
if (pos):
d['prev_pos=' + list[t - 1][1]] = 1
if (chunk):
d['prev_chunk=' + list[t - 1][2]] = 1
if (w2v):
if list[t - 1][0] in w2v_model.wv.vocab.keys():
w2v_arr = w2v_model.wv[list[t - 1][0]]
for i in range(0,50):
d['prev_w2v_' + str(i)] = w2v_arr[i]
if(t < len(list)-1):
if (token):
d['next_tok=' + list[t + 1][0].lower()] = 1
if (caps):
if list[t + 1][0][0].isupper():
d['next_is_caps'] = 1
if (pos):
d['next_pos=' + list[t + 1][1]] = 1
if (chunk):
d['next_chunk=' + list[t + 1][2]] = 1
if (w2v):
if list[t + 1][0] in w2v_model.wv.vocab.keys():
w2v_arr = w2v_model.wv[list[t + 1][0]]
for i in range(0,50):
d['next_w2v_' + str(i)] = w2v_arr[i]
if (w2v):
if tuple[0] in w2v_model.wv.vocab.keys():
w2v_arr = w2v_model.wv[tuple[0]]
for i in range(0,50):
d['w2v_' + str(i)] = w2v_arr[i]
dicts.append(d)
labels.append(tuple[3])
#print()
return dicts, np.asarray(labels)
pass
# In[7]:
def confusion(true_labels, pred_labels):
"""
Create a confusion matrix, where cell (i,j)
is the number of tokens with true label i and predicted label j.
Params:
true_labels....numpy array of true NER labels, one per token
pred_labels....numpy array of predicted NER labels, one per token
Returns:
A Pandas DataFrame (http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html)
See Log.txt for an example.
"""
columns = set(true_labels)
X = np.zeros((len(columns),len(columns)))
for t,i in enumerate(sorted(columns)):
for p,j in enumerate(sorted(columns)):
for a, b in zip(true_labels, pred_labels):
if a == i and b == j:
X[t][p] += 1
X = X.astype(np.int32)
return pd.DataFrame(X,columns=sorted(columns),index=sorted(columns))
#return pd.DataFrame(pd.crosstab(true_labels,pred_labels),columns=sorted(columns),index=sorted(columns))
pass
# In[8]:
def evaluate(confusion_matrix):
"""
Compute precision, recall, f1 for each NER label.
The table should be sorted in ascending order of label name.
If the denominator needed for any computation is 0,
use 0 as the result. (E.g., replace NaNs with 0s).
NOTE: you should implement this on your own, not using
any external libraries (other than Pandas for creating
the output.)
Params:
confusion_matrix...output of confusion function above.
Returns:
A Pandas DataFrame. See Log.txt for an example.
"""
rows = ['precision','recall','f1']
X = confusion_matrix.as_matrix()
columns = confusion_matrix.keys()
evaluation_matrix = np.zeros((len(rows), len(columns)))
for i,row in enumerate(sorted(columns)):
evaluation_matrix[0][i] = X[i][i] / np.sum(X[:,i])
evaluation_matrix[1][i] = X[i][i] / np.sum(X[i,:])
if (evaluation_matrix[0][i] + evaluation_matrix[1][i]) > 0:
evaluation_matrix[2][i] = (2 * evaluation_matrix[0][i] * evaluation_matrix[1][i]) / (evaluation_matrix[0][i] + evaluation_matrix[1][i])
else:
evaluation_matrix[2][i] = 0
return pd.DataFrame(evaluation_matrix,columns=sorted(columns),index=rows)
pass
# In[9]:
def average_f1s(evaluation_matrix):
"""
Returns:
The average F1 score for all NER tags,
EXCLUDING the O tag.
"""
return np.average(evaluation_matrix.as_matrix()[2][:-1])
pass
# In[10]:
def evaluate_combinations(train_data, test_data, w2v_model):
"""
Run 16 different settings of the classifier,
corresponding to the 16 different assignments to the
parameters to make_feature_dicts:
caps, pos, chunk, context
That is, for one setting, we'll use
token=True, caps=False, pos=False, chunk=False, context=False
and for the next setting we'll use
token=True, caps=False, pos=False, chunk=False, context=True
For each setting, create the feature vectors for the training
and testing set, fit a LogisticRegression classifier, and compute
the average f1 (using the above functions).
Returns:
A Pandas DataFrame containing the F1 score for each setting,
along with the total number of parameters in the resulting
classifier. This should be sorted in descending order of F1.
(See Log.txt).
Note1: You may find itertools.product helpful for iterating over
combinations.
Note2: You may find it helpful to read the main method to see
how to run the full analysis pipeline.
"""
columns = ['f1','n_params', 'caps', 'pos', 'chunk', 'context','w2v']
bool = [True, False]
comb = product(bool, repeat=5)
result = []
for c in sorted(comb):
temp = []
dicts, labels = make_feature_dicts(train_data,w2v_model, caps=c[0], pos=c[1],
chunk=c[2], context=c[3],w2v=c[4])
vec = DictVectorizer()
X = vec.fit_transform(dicts)
clf = LogisticRegression()
clf.fit(X, labels)
test_dicts, test_labels = make_feature_dicts(test_data,w2v_model, caps=c[0], pos=c[1],
chunk=c[2], context=c[3],w2v=c[4])
X_test = vec.transform(test_dicts)
preds = clf.predict(X_test)
n_params = np.multiply(clf.coef_.shape[0],clf.coef_.shape[1])
temp.append(average_f1s(evaluate(confusion(test_labels, preds))))
temp.insert(1, n_params)
temp.extend(c)
result.append(temp)
#return pd.DataFrame(sorted(result,key=lambda x:-x[0]), columns=columns, index=index)
return pd.DataFrame(result, index=range(0, 32), columns=columns).sort_values(by='f1', axis=0, ascending=False)
pass
# In[11]:
def main():
"""
This method is done for you.
See Log.txt for expected output.
"""
download_data()
brown = nltk.corpus.brown
w2v_model = word2vec.Word2Vec(brown.sents(), size=50, window=5, min_count=5)
w2v_model.save("w2v_model")
w2v_model = word2vec.Word2Vec.load("w2v_model")
train_data = read_data('train.txt')
dicts, labels = make_feature_dicts(train_data,
w2v_model,
token=True,
caps=True,
pos=True,
chunk=True,
context=True,
w2v=True)
vec = DictVectorizer()
X = vec.fit_transform(dicts)
print('training data shape: %s\n' % str(X.shape))
clf = LogisticRegression()
clf.fit(X, labels)
test_data = read_data('test.txt')
test_dicts, test_labels = make_feature_dicts(test_data,
w2v_model,
token=True,
caps=True,
pos=True,
chunk=True,
context=True,
w2v=True)
X_test = vec.transform(test_dicts)
print('testing data shape: %s\n' % str(X_test.shape))
preds = clf.predict(X_test)
confusion_matrix = confusion(test_labels, preds)
print('confusion matrix:\n%s\n' % str(confusion_matrix))
evaluation_matrix = evaluate(confusion_matrix)
print('evaluation matrix:\n%s\n' % str(evaluation_matrix))
print('average f1s: %f\n' % average_f1s(evaluation_matrix))
combo_results = evaluate_combinations(train_data, test_data, w2v_model)
print('combination results:\n%s' % str(combo_results))
f = open('output1.txt', 'w')
f.write('combination results:\n%s' % str(combo_results))
f.close()
# In[12]:
if __name__ == '__main__':
#orig_stdout = sys.stdout
#f = open('output1.txt', 'w')
#sys.stdout = f
main()
#sys.stdout = orig_stdout
#f.close()
# In[ ]:
| [
"[email protected]"
] | |
79d371c1b3c959cd4a3518ccc51afaf080344bfe | 04293bd329a143a638bb0fc1291ada6f0192be24 | /save_reload.py | 2bab4a30f8aa8b3a556d40c00ec4886fe8fafbf4 | [] | no_license | qq1065507891/pytorch | 1b3b2e882652d3279e180dd754b7af5b414a4956 | 41f211313a2e38aae3a375d5abb032173967ad9f | refs/heads/master | 2020-04-07T16:23:22.051506 | 2018-11-21T12:11:15 | 2018-11-21T12:11:15 | 158,527,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | import torch
import matplotlib.pyplot as plt
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size())
def save():
net1 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
optimizer = torch.optim.SGD(net1.parameters(), lr=0.5)
loss_func = torch.nn.MSELoss()
for i in range(1000):
prediction = net1(x)
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.figure(1, figsize=(10, 3))
plt.subplot(131)
plt.title('NET1')
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
torch.save(net1, 'net.pkl')
torch.save(net1.state_dict(), 'net_params.pkl')
def restore_net():
net2 = torch.load('net.pkl')
prediction = net2(x)
plt.subplot(132)
plt.title('Net2')
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
def restore_params():
net3 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
net3.load_state_dict(torch.load('net_params.pkl'))
prediction = net3(x)
# plot result
plt.subplot(133)
plt.title('Net3')
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.show()
# save net1
save()
# restore entire net (may slow)
restore_net()
# restore only the net parameters
restore_params()
| [
"[email protected]"
] | |
cf2775cb365ae36dbb028cd97bbc7706b62a071c | d24f09c896f75a64abf04fb0b6b2c1702a35fce7 | /P11_Grid_Search.py | 0f11baa740381618a9055f380619318673d3f814 | [] | no_license | sarvesh10491/Python-Hackerrank-Problems | 16a7b566958dbd429b12d5b83346dd9c87eebab2 | f797307a44b76612a6f11d71e335469a5acab7f3 | refs/heads/master | 2020-03-19T07:53:00.054963 | 2018-06-16T06:01:06 | 2018-06-16T06:01:06 | 136,156,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | # https://www.hackerrank.com/challenges/the-grid-search/problem
import math
import os
import random
import re
import sys
# Function to find all possible starting indices of pattern substring in main string
def found_at(parentstr, substr):
indices=[]
i = parentstr.find(substr)
while i >= 0:
indices.append(i)
i = parentstr.find(substr, i+1)
return indices
def gridSearch(G, P):
gi=0
pi=0
# Check grid only till we can ensure pattern can fit to be found & not search entire grid to last line
while gi<(len(G)-len(P)+1):
idx=G[gi].find(P[pi])
if idx!=-1: # 1st line of pattern found in one of the line
# Find indices of all matching paterns in line
# ps = [pat.start() for pat in re.finditer(re.escape(P[pi]), G[gi])] <= This didnt work as it skips current successful pattern in next search
ps = found_at(G[gi],P[pi])
# print("Found in line",gi,"at",ps)
for k in ps: # For each index as starting point
idx=k
tgi=gi
tpi=0
while tpi<len(P): # Check all subsequent lines in grid to see if respective subsequent lines in pattern also exist in them
tidx=G[tgi+tpi].find(P[tpi],idx)
if tidx!=idx:
break
else:
tpi+=1
if tpi==len(P):
return ("YES")
gi+=1
else:
gi+=1
return ("NO")
# Input
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
RC = input().split()
R = int(RC[0])
C = int(RC[1])
G = []
for _ in range(R):
G_item = input()
G.append(G_item)
rc = input().split()
r = int(rc[0])
c = int(rc[1])
P = []
for _ in range(r):
P_item = input()
P.append(P_item)
result = gridSearch(G, P)
print(result)
| [
"[email protected]"
] | |
9a6666ffe7fd9c01862329091ec04e6fb5b1e21a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/a033dd3f296e4281ad7fae26e02d0a4d.py | 612ab04228445c3976c52b87575abb7b15c6a2a3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 262 | py | DNA_TO_RNA = {
'G' :'C',
'C' : 'G',
'T' : 'A',
'A' : 'U',
}
def to_rna(dna):
rna = ''
for c in dna:
if c not in DNA_TO_RNA:
raise ValueError("illegal nucleotide '%s' in dna" % c)
rna = rna + DNA_TO_RNA[c]
return rna
| [
"[email protected]"
] | |
3f87615992dc362c32de8d13fccaa5a95fa35eb0 | cb18fc0115cd43ed0a6b829e14cfff9dffbf30ce | /vec_env_es.py | e7cb7588f470b76364ca1b48fee51e8f306b7dff | [] | no_license | Guillaume-Cr/evolution_strategies | 4f9c67b1fe2d7c3326ec3763e80398629186a5fd | 10eb7a462b5b34eb4b86745a4a383215a0edefe9 | refs/heads/master | 2022-12-09T16:28:45.837791 | 2020-09-13T03:54:18 | 2020-09-13T03:54:18 | 277,405,046 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,501 | py | import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from collections import namedtuple, deque
import random
import time
import multiprocessing as mp
import threading
from gym.vector.tests.utils import make_env, make_slow_env
from gym.vector.async_vector_env import AsyncVectorEnv
import concurrent.futures
from agent import Agent
from agent_test import AgentTest
print("Cores", mp.cpu_count())
#Number of agents working in parallel
num_agents = 50
env_fns = [make_env('CartPole-v0', num_agents) for _ in range(num_agents)]
env = AsyncVectorEnv(env_fns)
agent = Agent(env, state_size=4, action_size=2, num_agents=num_agents)
env_test = gym.make('CartPole-v0')
agent_test = AgentTest(env_test, state_size=4, action_size=2)
def sample_weights(current_weight, seed, rng, std):
rng = np.random.RandomState(seed)
weights = current_weight + (std*rng.randn(agent.get_weights_dim()))
print("weights before: ", weights)
return {seed : weights}
def update_weights(weights, indices, alpha, std):
reconstructed_weights = np.zeros((len(indices), agent.get_weights_dim()))
for i in indices:
rng = np.random.RandomState(i)
reconstructed_weights[i] = weights + std*rng.randn(agent.get_weights_dim())
rewards = agent.evaluate(reconstructed_weights, num_agents)
scaled_rewards = np.zeros(len(rewards))
for i in indices:
scaled_rewards[i] = rewards[i]
scaled_rewards = (scaled_rewards - np.mean(scaled_rewards)) / (np.std(scaled_rewards) + 0.1)
n = len(rewards)
deltas = alpha / (n * std) * np.dot(reconstructed_weights.T, scaled_rewards)
return weights + deltas
def evolution(num_agents, n_iterations=10000, max_t=2000, alpha = 0.01, gamma=1.0, std=0.1):
"""Deep Q-Learning.
Params
======
n_iterations (int): number of episodes used to train the agent
max_t (int): maximum number of timesteps per episode
alpha (float): iteration step
gamma (float): discount rate
population (int): size of population at each iteration
std (float): standard deviation of additive noise
"""
scores_deque = deque(maxlen=100)
scores = []
current_weights = []
sampled_rewards = {}
sampled_weights = {}
previous_reward = 0
start_time = time.time()
current_weights = std*np.random.randn(agent.get_weights_dim())
indices = [i for i in range(num_agents)]
rngs = [np.random.RandomState(i) for i in range(num_agents)]
for i_iteration in range(1, n_iterations+1):
seeds = [i for i in range(num_agents)]
sampled_rewards.clear()
sampled_weights.clear()
# with concurrent.futures.ThreadPoolExecutor(max_workers=num_agents) as executor:
# futures = list()
# for j in range(num_agents):
# seed = seeds[j]
# rng = rngs[j]
# futures.append(executor.submit(sample_weights, current_weights, seed, rng, std))
# for future in futures:
# return_value = future.result()
# sampled_weights.update(return_value)
# for i in range(num_agents):
# sampled_rewards.update({i: agent_test.evaluate(sampled_weights[i])})
#sampled_rewards = agent.evaluate(sampled_weights, num_agents, gamma, max_t)
current_weights = update_weights(current_weights, indices, alpha, std)
current_reward = agent_test.evaluate(current_weights)
scores_deque.append(current_reward)
scores.append(current_reward)
torch.save(agent.state_dict(), 'checkpoint.pth')
if i_iteration % 1 == 0:
print('Episode {}\tAverage Score: {:.2f}'.format(i_iteration, np.mean(scores_deque)))
if i_iteration % 100 == 0:
elapsed_time = time.time() - start_time
print("Duration: ", elapsed_time)
if np.mean(scores_deque)>=195.0:
print('\nEnvironment solved in {:d} iterations!\tAverage Score: {:.2f}'.format(i_iteration-100, np.mean(scores_deque)))
elapsed_time = time.time() - start_time
print("Training duration: ", elapsed_time)
break
return scores
scores = evolution(num_agents)
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.savefig('training_result.png') | [
"[email protected]"
] | |
967f4507e9be93893f9db9e8ab04d072e7c1c49c | 16ac02b8f427bd622af1564f1236e4913ed63521 | /Codes/Version 1.6/force_raised_gaussian.py | 003e679b1563108f7216fab5e0a3d0cd04424273 | [
"MIT"
] | permissive | gharib85/Brownian-dynamics-in-a-time-varying-force-field | 20660665747310e1201e8ca7d404acc15ec7a3bd | 1dce268fcc4f27e066be0ec0b511178cbc1437c5 | refs/heads/main | 2023-08-16T03:47:51.957137 | 2021-10-23T19:09:50 | 2021-10-23T19:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,593 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on March 6, 2021
@author: asif
"""
import numpy as np
import pylab as py
import matplotlib as plt
ro = 2e-6
tfinal = 12
xrange_limit = 30e-6 # Max and min of x axis range for plotting animation
zlow_limit = -10e-6
zhigh_limit = 30e-6
r_active = 0
n_order = 1 # Order of the Gaussian potential = 2n
w_well = 10e-6 # 1/e *max width of the potential well
A_well = 4000*1.38e-23*300 # well depth
def draw_geo(tm, ax_xy, ax_yz, ax_xz):
# March 7, 2021
# The flag_source_state variable is used to draw/erase the source geometry only once
# This is necessary to speed up the animation.
global flag_source_state_1 # Make this variable global so that the assigned value remains saved globally as t changes
global flag_source_state_2
if 'flag_source_state_1' not in globals():
global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes
flag_source_state_1 = 0 # initialize with OFF state
print('Defining global flag for source geometry \n')
if 'flag_source_state_2' not in globals():
global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes
flag_source_state_2 = 0 # initialize with OFF state
print('Defining global flag for source geometry \n')
# Draw static geometry (only once)
if flag_source_state_2 < 1:
py.sca(ax_yz)
substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
py.gca().add_patch(substrate_yz)
py.sca(ax_xz)
substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
py.gca().add_patch(substrate_xz)
py.sca(ax_xy)
substrate_xy = py.Rectangle((-xrange_limit*1e6, -xrange_limit*1e6),2*xrange_limit*1e6,2*xrange_limit*1e6,fc='#f9f9f9')
py.gca().add_patch(substrate_xy)
flag_source_state_2 = 1
# Draw source
if (tm > 1) & (tm < 8) & (flag_source_state_1 < 1):
patch_spot_xy = py.Circle((0, 0), 0.5*w_well*1e6, fc='#ff8c00',alpha = 0.8)
# patch_spot_yz = plt.patches.Arc((0, 0), 0.5*w_well*1e6, 0.5*w_well*1e6,0, 0, 180, fc='#ff8c00',alpha = 0.8)
py.sca(ax_xy)
py.gca().add_patch(patch_spot_xy)
# py.sca(ax_yz)
# py.gca().add_patch(patch_spot_yz)
flag_source_state_1 = 1
print('Drawing source\n')
# Erase source (draw a white circle)
if (tm > 8) & (flag_source_state_1 == 1):
patch_spot = py.Circle((0, 0), 0.51*w_well*1e6, fc='#f9f9f9',alpha = 1)
py.gca().add_patch(patch_spot)
print('Erasing source\n')
flag_source_state_1 = 0
# def draw_yz(tm):
# substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_yz)
# def draw_xz(tm):
# substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_xz)
# This is function that is called from the main program
# Simplified spring force model
def force_profile(r_in, t):
Np = r_in[0,:].size
fm = np.zeros((3,Np))
r_norm = np.linalg.norm(r_in, axis = 0) + 1e-30
g = A_well*np.exp(-(r_norm/w_well)**(2*n_order))
if (t > 1) & (t<8):
fm[0,:] = -2*n_order*r_in[0,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
fm[1,:] = -2*n_order*r_in[1,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
fm[2,:] = -2*n_order*r_in[2,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
# fm[:,2] = 0
# fm[:,3] = 0
# fm[:,4] = 0
# fm[:,5] = 0
# fm[:,6] = 0
return fm
def force_plot():
Np = 1
rin = np.zeros((3,Np))
r_in = np.tile(np.linspace(-xrange_limit,xrange_limit,200),(3,1))
F = force_profile(r_in,2)
py.figure()
py.plot(r_in[0,:]*1e6,F[0,:]*1e12, label = '$F_x$')
# py.plot(r_in[1,:]*1e6,F[1,:]*1e12,'.', label = '$F_y$')
# py.plot(r_in[2,:]*1e6,F[2,:]*1e12,'x', label = '$F_z$')
py.xlabel('$x$ ($\mu$m)')
py.ylabel('Force (pN)')
py.legend()
# force_plot()
# draw_source(9)
| [
"[email protected]"
] | |
ea71aaba6f75c0f5a70a4dda257718495ce1f587 | f350818ee4b5c5278f3a3331db27749c7e8af62c | /serverenv/bin/gunicorn_paster | b3878573bf0f6fbfb665c71a87040349051c37f0 | [] | no_license | AashrayAnand/table_list | 2195ce1bba4753b71220692cadb69dc0ad92d053 | 335bd4673afd7d9cf0b040bbb6334cb8fa3e98e0 | refs/heads/master | 2023-01-20T02:43:34.486581 | 2019-01-12T23:13:20 | 2019-01-12T23:13:20 | 153,882,251 | 1 | 0 | null | 2023-01-13T22:48:03 | 2018-10-20T07:46:42 | Python | UTF-8 | Python | false | false | 271 | #!/home/user_aashray/sites/live.uwpaphi.site/serverenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
db3e53c2289ed0be9042c4809d70477cec9c9646 | 4db0ecdaf1121c72a38feb3fe80c05467f8f9815 | /plot_roc_multi.py | 391ed21bd63ebb705d48b2bf95fdfa52c29306d3 | [] | no_license | zhiyong-zhou/roc_utils | 46d2f57e3cac7be3672a911873463720eae4177d | d747e72e0542b51b9cade370e2a3efad731740d0 | refs/heads/master | 2023-07-15T19:27:28.243152 | 2019-12-06T03:06:00 | 2019-12-06T03:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | """
plot_roc_multi:
Plots multiple ROC curves on the same graph (useful for comparison).
Note: for more detailed information on a single ROC see 'plot_roc'.
Author:
Keith Kenemer
"""
import os,sys
import numpy as np
import pickle
import matplotlib.pyplot as plt
from sklearn.metrics import auc
# process command line
if len(sys.argv) < 2:
print("\nUsage: plot_roc_multi <roc1.pkl> <roc2.pkl> ... " )
print("roc<x>.pkl: pickled (tpr,fpr,thr) tuple output from sklearn roc_curve()" )
print("\n")
exit()
# setup plot
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.xscale('log')
plt.title('ROC curve comparison')
# load & plot saved roc data
colors = ['b', 'g', 'r','m','c']
for k in range(1,len(sys.argv) ):
with open(sys.argv[k],"rb") as f:
roc = pickle.load(f, encoding = 'latin1')
fpr = roc[0]
tpr = roc[1]
plt.plot(fpr,tpr, color = colors[k%len(colors)], linewidth = 1, label = sys.argv[k] )
# show completed plot
plt.grid()
plt.legend(loc='lower right')
plt.show()
| [
"[email protected]"
] | |
553f569241527a84d22652ea7170576a0d8bb70b | c85be7ac89f702880dcc35cda13918bd01bc36a1 | /app.py | d84bdfbcfbd988b33444c67c6abe3b962bf96134 | [] | no_license | MarcoBurgos/MasterCryptoTeam | 1fb101cea08b6b99bc22b7835a4b7526b8cd4fa1 | c0a5c66cde61eefb5f173b46de7bfcb8fd79c6bd | refs/heads/master | 2022-12-13T23:47:18.031317 | 2019-06-16T05:30:14 | 2019-06-16T05:30:14 | 187,737,852 | 0 | 0 | null | 2022-12-08T01:45:51 | 2019-05-21T01:18:19 | Python | UTF-8 | Python | false | false | 81 | py | from mastercrypto import app
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
795299e5984a625559bf3332c4649ad94559164d | 52a15d4fabf68bf23a23799312ae40465764908c | /src/operation/unittest.py | ea1bd413fef2f9e5c4a43d478e9926b0e0835959 | [
"MIT",
"Apache-2.0"
] | permissive | jensl/critic | 2071a1b0600051967323df48f4d3a5656a5d2bb8 | c2d962b909ff7ef2f09bccbeb636333920b3659e | refs/heads/stable/1 | 2022-05-28T03:51:15.108944 | 2018-03-27T18:47:46 | 2018-03-29T15:08:30 | 6,430,552 | 224 | 36 | NOASSERTION | 2023-05-29T15:38:00 | 2012-10-28T18:26:04 | Python | UTF-8 | Python | false | false | 123 | py | def independence():
# Simply check that operation can be imported.
import operation
print "independence: ok"
| [
"[email protected]"
] | |
2a499fd7e7b8c5bbf2617bae35a047e99b8d6b08 | 637ec65429e817c6c12fc66bad299a9ff831ca3c | /supplier_management/supplier_management/doctype/supplier_product_info/supplier_product_info.py | a6f4b09f223a966bb66a2fb4d123987e1b8b7488 | [
"MIT"
] | permissive | ashish-greycube/supplier_management | 292ca4d956fdc8659e630ec9a8280d0b77037f25 | c6f32c383f3d6e9a459903652a42341beb7f8482 | refs/heads/master | 2020-09-02T22:05:23.001424 | 2020-01-08T07:03:05 | 2020-01-08T07:03:05 | 219,316,260 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SupplierProductInfo(Document):
pass
| [
"[email protected]"
] | |
aa3d3d73ed130154ffeca62917f6d42d463b92b8 | 3eb99709809a493c46a79171ef9774aa4261b59d | /脚本/llianli/cfapp_ei.py | 0ca3d8cf2ee5e729d313f8426799f897d4cd36f7 | [] | no_license | bingwin/tencent | c831a5b344f597a06c7a7b179d4f67d668198c90 | ea5dc5ff398d85cfdf4df056dc8b4064e66fb5fb | refs/heads/master | 2020-07-28T21:44:00.281933 | 2016-05-28T03:21:31 | 2016-05-28T03:21:31 | 209,548,176 | 1 | 0 | null | 2019-09-19T12:29:21 | 2019-09-19T12:29:21 | null | UTF-8 | Python | false | false | 6,718 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# ******************************************************************************
# 程序名称: cfapp_ei.py
# 功能描述: cfapp每日访问的事件n数目
# 输入参数: yyyymmdd 例如:20151208
# 目标表名:
# 数据源表: teg_mta_intf::ieg_lol
# 创建人名: llianli
# 创建日期: 2015-12-08
# 版本说明: v1.0
# 公司名称: tencent
# 修改人名:
# 修改日期:
# 修改原因:
# ******************************************************************************
#import system module
# main entry
import datetime
import time
def TDW_PL(tdw, argv=[]):
tdw.WriteLog("== begin ==")
tdw.WriteLog("== argv[0] = " + argv[0] + " ==")
sDate = argv[0]
tdw.WriteLog("== sDate = " + sDate + " ==")
tdw.WriteLog("== connect tdw ==")
sql = """use ieg_qt_community_app"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.splitbylinenum=true"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.line_num_per_split=1000000"""
res = tdw.execute(sql)
##创建表写数据
sql = '''
CREATE TABLE IF NOT EXISTS tb_cf_app_ei
(
fdate INT,
id INT,
ei1 STRING,
ei2 STRING,
uin_mac STRING,
uin STRING,
pv BIGINT
)
'''
tdw.WriteLog(sql)
res = tdw.execute(sql)
sql = ''' DELETE FROM tb_cf_app_ei WHERE fdate = %s '''%(sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
##将每日的数据配置写入表中
sql = '''
INSERT TABLE tb_cf_app_ei
SELECT
%s AS fdate,
id,
ei1,
ei2,
uin_info,
uin,
COUNT(*) AS pv
FROM
(
SELECT
id,
'all' AS ei1,
case
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') not in ('图片','手机','论坛','电脑','游戏')) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '资讯列表项')
then '情报站-资讯'
when (id = 1100679031 and ( ei in ('视频播放次数') or (ei = '资讯广告点击' and get_json_object(kv,'$.type') = '视频') ) ) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '视频列表项')
then '情报站-视频'
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') ='图片') or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '图片列表项')
then '情报站-图片'
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') in ('手机','电脑','论坛','游戏')) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '活动列表项')
then '情报站-活动'
when (id = 1100679031 and ei = '我模块点击次数' ) or (id = 1200679031 and ei = '情报站社区基地我TAB点击次数' and get_json_object(kv,'$.type') = '我') then '我-战绩'
when (id = 1100679031 and ei = '我_战绩资产记录展示次数' and get_json_object(kv,'$.tab') = '装备') or (id = 1200679031 and ei = '战绩资产记录TAB点击次数' and get_json_object(kv,'$.type') = '资产') then '我-资产'
when (id = 1100679031 and ei = '我_战绩资产记录展示次数' and get_json_object(kv,'$.tab') = '记录') or (id = 1200679031 and ei = '战绩资产记录TAB点击次数' and get_json_object(kv,'$.type') = '记录') then '我-记录'
when (id = 1100679031 and ei = '客态资料' ) then '客态资料'
when (id = 1100679031 and ei = '道聚城点击次数') or (id = 1200679031 and ei = '道具城点击') then '基地-道聚城'
when (id = 1100679031 and ei = '火线_视频点击次数') or (id = 1200679031 and ei = '火线时刻视频点击次数') then '基地-火线时刻'
when (id = 1100679031 and ei = '我的仓库点击' ) or (id = 1200679031 and ei = '我的仓库点击') then '基地-我的仓库'
when (id = 1100679031 and ei = '军火基地点击次' ) or (id = 1200679031 and ei = '军火基地点击次') then '基地-军火基地'
when (id = 1100679031 and ei= '基地WEB页面点击次数' and get_json_object(kv,'$.title') = '周边商城') then '基地-周边商城'
when (id = 1100679031 and ei = '竞猜大厅入口' ) or (id = 1200679031 and ei = '竞猜大厅入口点击次数') then '基地-赛事竞猜'
when (id = 1100679031 and ei = '火线百科点击次数' ) or (id = 1200679031 and ei = '火线百科点击') then '基地-火线百科'
when (id = 1100679031 and ei = '火线助手点击次数' ) or (id = 1200679031 and ei = '火线助手') then '基地-火线助手'
when (id = 1100679031 and ei = '我的任务点击次数' ) or (id = 1200679031 and ei = '我的任务点击') then '基地-我的任务'
when (id = 1100679031 and ei = '地图点位模块点击次数' ) or (id = 1200679031 and ei = '地图点图') then '基地-地图点位'
when (id = 1100679031 and ei in ('每天用户发的消息' ,'每天用户发的消息')) then '社区-聊天'
when (id = 1100679031 and ei = '社区_CF论坛点击次数' ) or (id = 1200679031 and ei = 'CF论坛点击') then '社区-CF论坛'
when (id = 1100679031 and ei = '社区_CF手游论坛点击次数' ) or (id = 1200679031 and ei = '点击CF手游论坛') then '社区-CF手游论坛'
when (id = 1100679031 and ei = '社区_兴趣部落点击次数' ) or (id = 1200679031 and ei = 'CF兴趣部落') then '社区-兴趣部落'
ELSE 'other'
end as ei2,
concat(ui,mc) AS uin_info,
get_json_object(kv,'$.uin') AS uin
FROM teg_mta_intf::ieg_lol WHERE sdate = %s AND id in (1100679031,1200679031)
)t1 WHERE ei1 != 'other' AND ei2 != 'other'
GROUP BY id,ei1,ei2,uin_info,uin
'''%(sDate,sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
tdw.WriteLog("== end OK ==")
| [
"[email protected]"
] | |
be5e16b90dffe2f64db5a9e7ecca9866ccefcf3d | c7b5c4432525e97d7735472da9e85ce952b05bff | /build_run.py | b56c81262d3dece7932b0ae909eb24674856bee8 | [] | no_license | michaelposada/Docker-Builder-Python- | 99b8f7314c9426d0b0fa27e6f1120a1bf58448fb | 32f266bb04d67efe6bbac838c7a0b22e93582f2e | refs/heads/master | 2020-06-22T06:08:09.285155 | 2019-08-07T15:21:37 | 2019-08-07T15:21:37 | 197,652,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | ##Version Python 3.7.6
import os
import subprocess
print("I will now start to build these Images. Debian 7, Debian 8, Debian 9, Ubuntu 18.04 and Windows")
owd = os.getcwd()
os.chdir("Debian-7/")
os.system("docker build -t isa/debian7 . ")
os.system("docker run --cidfile debian7_id.cid isa/debian7")
cmd = os.popen("type debian7_id.cid").read()
os.system("del -f debian7_id.cid")
os.chdir(owd)
os.system("docker cp "+ cmd + ":./installSynApps/DEPLOYMENTS .")
os.chdir("Debian-8/")
os.system("docker build -t isa/debian8 . ")
os.system("docker run --cidfile debian8_id.cid isa/debian8")
cmd = os.popen("type debian8_id.cid").read()
os.system("del /f debian8_id.cid")
os.chdir(owd)
os.system("docker cp "+ cmd + ":./installSynApps/DEPLOYMENTS .")
os.chdir("Debian-9/")
os.system("docker build -t isa/debian9 . ")
os.system("docker run --cidfile debian9_id.cid isa/debian9")
cmd = os.popen("type debian9_id.cid").read()
os.system("del /f debian9_id.cid")
os.chdir(owd)
os.system("docker cp "+ cmd + ":./installSynApps/DEPLOYMENTS .")
os.chdir("Ubuntu/")
os.system("docker build -t isa/ubuntu_18.04 . ")
os.system("docker run --cidfile ubuntu18.04_id.cid isa/ubuntu_18.04")
cmd = os.popen("type ubuntu18.04_id.cid").read()
os.system("del /f ubuntu18.04_id.cid")
os.chdir(owd)
os.system("docker cp "+ cmd + ":./installSynApps/DEPLOYMENTS .")
| [
"[email protected]"
] | |
e06f845f010a2d4c14cb57449a23c59759b66c2c | 933ac63a0986d394a6d608c17447c5b522815505 | /koodi.py | a3e952c00e95527dfa651694849a38b081448df1 | [] | no_license | meemu/infovizu2019 | 3b506edc3f5069721b5691e997b79a59bbae3e5d | b3b63cf41e91388e7bc578c13c3593c5c2f44e97 | refs/heads/master | 2020-05-19T13:32:05.104369 | 2019-05-20T19:42:16 | 2019-05-20T19:42:16 | 185,042,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | import pandas as pd
from bokeh.io import show, output_file
from bokeh.plotting import figure, curdoc
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, HoverTool, Div, Select
from bokeh.sampledata.autompg import autompg_clean as df
from bokeh.palettes import Inferno
# Selecting the data from .csv
# downloaded from https://www.kaggle.com/russellyates88/suicide-rates-overview-1985-to-2016
master = pd.read_csv('master.csv')
columns = master[['country','year','sex','suicides_no']]
finland = columns[columns['country'] == 'Finland']
# Males by year
males = finland[finland['sex'] == 'male']
males_yearly = males.groupby('year').sum()
males_ds = ColumnDataSource(males_yearly)
# Females by year
females = finland[finland['sex'] == 'female']
females_yearly = females.groupby('year').sum()
females_ds = ColumnDataSource(females_yearly)
p = figure(plot_height=500,
plot_width=1000,
h_symmetry=True,
x_axis_label='Year',
x_axis_type='linear',
x_axis_location='below',
x_range=(1987, 2015),
y_axis_label='Number of suicides',
y_axis_type='linear',
y_axis_location='left',
y_range=(0, 1300),
title='Suicide Rates in Finland from 1987 to 2016',
title_location='above',
toolbar_location='below',
tools='save',
min_border_left=100,
min_border_top=50,
min_border_right=20)
p.grid.grid_line_color = None
p.line(source=males_ds, x='year', y='suicides_no', color='darkcyan',
line_width=2.5, legend='Males')
p.line(source=females_ds, x='year', y='suicides_no', color='deeppink',
line_width=2, legend='Females')
p.add_tools(HoverTool(
tooltips=[
( 'Year', '@year' ),
( 'Suicides', '@suicides_no' )
],
mode='vline'
))
text1 = Div(text="""Your <a href="https://en.wikipedia.org/wiki/HTML">HTML</a>-supported text is initialized with the <b>text</b> argument. The
remaining div arguments are <b>width</b> and <b>height</b>. For this example, those values
are <i>200</i> and <i>100</i> respectively.""",
width=1000, height=50)
text2 = Div(text="""Your <a href="https://en.wikipedia.org/wiki/HTML">HTML</a>-supported text is initialized with the <b>text</b> argument. The
remaining div arguments are <b>width</b> and <b>height</b>. For this example, those values
are <i>200</i> and <i>100</i> respectively.""",
width=1000, height=100)
output_file('index.html')
| [
"[email protected]"
] | |
71119c97936e2b9ffd38515d8759e7a17e791b0f | 3ec08df086670b0399a4455cea6e44076c389ad8 | /tests/components/nest/test_sensor_sdm.py | b1dddcd94949ee159b88ab815db62514d2431f6f | [
"Apache-2.0"
] | permissive | misialq/home-assistant | 1cb5c2a80e375c85cc0a36dc8c7a2734fb1940eb | af5fd74d6f936b159dd06cd19770110ea0dd3d7e | refs/heads/dev | 2023-08-18T09:05:01.747456 | 2023-01-28T06:14:36 | 2023-01-28T06:14:36 | 230,528,524 | 0 | 0 | Apache-2.0 | 2023-04-21T06:58:51 | 2019-12-27T22:45:56 | Python | UTF-8 | Python | false | false | 9,373 | py | """
Test for Nest sensors platform for the Smart Device Management API.
These tests fake out the subscriber/devicemanager, and are not using a real
pubsub subscriber.
"""
from typing import Any
from google_nest_sdm.event import EventMessage
import pytest
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
PERCENTAGE,
STATE_UNAVAILABLE,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from .common import DEVICE_ID, CreateDevice, FakeSubscriber, PlatformSetup
@pytest.fixture
def platforms() -> list[str]:
"""Fixture to setup the platforms to test."""
return ["sensor"]
@pytest.fixture
def device_traits() -> dict[str, Any]:
"""Fixture that sets default traits used for devices."""
return {"sdm.devices.traits.Info": {"customName": "My Sensor"}}
async def test_thermostat_device(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
):
"""Test a thermostat with temperature and humidity sensors."""
create_device.create(
{
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
"sdm.devices.traits.Humidity": {
"ambientHumidityPercent": 35.0,
},
}
)
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
assert (
temperature.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== UnitOfTemperature.CELSIUS
)
assert (
temperature.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE
)
assert temperature.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert temperature.attributes.get(ATTR_FRIENDLY_NAME) == "My Sensor Temperature"
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is not None
assert humidity.state == "35"
assert humidity.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert humidity.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY
assert humidity.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert humidity.attributes.get(ATTR_FRIENDLY_NAME) == "My Sensor Humidity"
registry = er.async_get(hass)
entry = registry.async_get("sensor.my_sensor_temperature")
assert entry.unique_id == f"{DEVICE_ID}-temperature"
assert entry.domain == "sensor"
entry = registry.async_get("sensor.my_sensor_humidity")
assert entry.unique_id == f"{DEVICE_ID}-humidity"
assert entry.domain == "sensor"
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.name == "My Sensor"
assert device.model == "Thermostat"
assert device.identifiers == {("nest", DEVICE_ID)}
async def test_thermostat_device_available(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
):
"""Test a thermostat with temperature and humidity sensors that is Online."""
create_device.create(
{
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
"sdm.devices.traits.Humidity": {
"ambientHumidityPercent": 35.0,
},
"sdm.devices.traits.Connectivity": {"status": "ONLINE"},
}
)
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is not None
assert humidity.state == "35"
async def test_thermostat_device_unavailable(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
):
"""Test a thermostat with temperature and humidity sensors that is Offline."""
create_device.create(
{
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
"sdm.devices.traits.Humidity": {
"ambientHumidityPercent": 35.0,
},
"sdm.devices.traits.Connectivity": {"status": "OFFLINE"},
}
)
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == STATE_UNAVAILABLE
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is not None
assert humidity.state == STATE_UNAVAILABLE
async def test_no_devices(hass: HomeAssistant, setup_platform: PlatformSetup):
"""Test no devices returned by the api."""
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is None
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is None
async def test_device_no_sensor_traits(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
) -> None:
"""Test a device with applicable sensor traits."""
create_device.create({})
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is None
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is None
@pytest.mark.parametrize("device_traits", [{}]) # Disable default name
async def test_device_name_from_structure(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
) -> None:
"""Test a device without a custom name, inferring name from structure."""
create_device.create(
raw_traits={
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.2,
},
},
raw_data={
"parentRelations": [
{"parent": "some-structure-id", "displayName": "Some Room"}
],
},
)
await setup_platform()
temperature = hass.states.get("sensor.some_room_temperature")
assert temperature is not None
assert temperature.state == "25.2"
async def test_event_updates_sensor(
hass: HomeAssistant,
subscriber: FakeSubscriber,
create_device: CreateDevice,
setup_platform: PlatformSetup,
) -> None:
"""Test a pubsub message received by subscriber to update temperature."""
create_device.create(
{
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
}
)
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
# Simulate a pubsub message received by the subscriber with a trait update
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": DEVICE_ID,
"traits": {
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 26.2,
},
},
},
},
auth=None,
)
await subscriber.async_receive_event(event)
await hass.async_block_till_done() # Process dispatch/update signal
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "26.2"
@pytest.mark.parametrize("device_type", ["some-unknown-type"])
async def test_device_with_unknown_type(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
) -> None:
"""Test a device without a custom name, inferring name from structure."""
create_device.create(
{
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
}
)
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
assert temperature.attributes.get(ATTR_FRIENDLY_NAME) == "My Sensor Temperature"
registry = er.async_get(hass)
entry = registry.async_get("sensor.my_sensor_temperature")
assert entry.unique_id == f"{DEVICE_ID}-temperature"
assert entry.domain == "sensor"
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.name == "My Sensor"
assert device.model is None
assert device.identifiers == {("nest", DEVICE_ID)}
async def test_temperature_rounding(
hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup
) -> None:
"""Test the rounding of overly precise temperatures."""
create_device.create(
{
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.15678,
},
}
)
await setup_platform()
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature.state == "25.2"
| [
"[email protected]"
] | |
0d54aedefc1dae7703f514fdd10a16434c3e4068 | fd83602e34b8bde0f179753defa314859e9c5d9d | /OpenCV目标检测算法/object-detection_by_opencv.py | 640a07b6cb222222f745324ac2fb05530a401328 | [] | no_license | HIT-five/2020Picture_project | 5467ccca8b2752954429a10c8ff4444c357e24f2 | 4c17d88b0d0c1b7a652cb56ce18c0d9ae7808501 | refs/heads/master | 2022-11-16T00:51:02.041817 | 2020-07-13T19:14:51 | 2020-07-13T19:14:51 | 279,364,686 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
import copy
import math
cap = cv2.VideoCapture('./video1.avi')
fgbg = cv2.createBackgroundSubtractorMOG2()# 创建高斯模型混合对象
thresh = 200
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
while True:
ret,frame = cap.read()
if not ret:
break
fgmask = fgbg.apply(frame) # 获取前景的蒙版
# cv2.imshow('mask',fgmask)
# cv2.waitKey(300)
# cv2.destroyAllWindows()
_,fgmask = cv2.threshold(fgmask,30,0xff,cv2.THRESH_OTSU) #对前景蒙版进行二值化处理,前景为白色
bgImage = fgbg.getBackgroundImage() #获取背景
fgmask = cv2.morphologyEx(fgmask,cv2.MORPH_OPEN,kernel)
# cv2.imshow('mask',fgmask)
# cv2.waitKey(300)
# cv2.destroyAllWindows()
cnts,_ = cv2.findContours(fgmask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #外轮廓检测,把白色的轮廓框出来,即相当于得到了运动的物体,最关键的部分在上面,Opencv牛逼,直接给一个函数帮你做了最难的事情
count = 0
for c in cnts:
area = cv2.contourArea(c)
if (area<thresh):
continue
count += 1
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0xff,0),2)
print('共检测到:',count,'个目标','\n')
cv2.imshow('frame',frame)
cv2.imshow('Background',bgImage)
key = cv2.waitKey(30)
if key==27:
break
| [
"[email protected]"
] | |
9271866dce787c1407b384f9a384b7bdc0d9bf89 | 286043f55a36f9d5844986b0eea58674b1c37353 | /src/stickNAUTA/__init__.py | 0ba48412d9b2f62c4f7b4de0f52e14ea7e0f8c5d | [
"MIT"
] | permissive | blacknoize404/stickNAUTA | a88526463b563e54a28ca815fbd8ab5791885be9 | 30a5d5a178435844dbafe9743fb7317deb71195c | refs/heads/main | 2023-04-15T23:15:39.461398 | 2021-04-11T19:00:58 | 2021-04-11T19:00:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from .NautaSession import NautaSession
__version__ = '1.0.0'
__all__ = ['NautaSession']
__author__ = 'stickM4N [email protected]'
| [
"[email protected]"
] | |
698615984a24120282d332cfef57d98cdf075fb5 | 0c325cf7a68ef51067ed8db566d525a20de5b635 | /python/xlrd_and_xlwt/xlrd_test.py | ff5eb4eb3632e68644309cd097ce90b78ddb3c9c | [] | no_license | alinzel/NOTES | 2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241 | 3e0594641a605580e920d0b08a251fbc99f34e2f | refs/heads/master | 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 | HTML | UTF-8 | Python | false | false | 3,865 | py | # TODO xlrd--一个从excel文件中读取和格式化数据信息的库,无论是xls还是xlsx文件
import xlrd
# 打开excel文件,返回实例对象-<xlrd.book.Book object at 0x000001ED41180898>
excel = xlrd.open_workbook(r"./excel/2017年人员电子档案.xlsx") #r-->保持原始字符串,不转义
# 获取sheet的名字,返回名字列表-['2017-6-22', '测试']
sheet_names = excel.sheet_names()
# 获取sheet对象,返回对象列表-[<xlrd.sheet.Sheet object at 0x0000023A57014CC0>, <xlrd.sheet.Sheet object at 0x0000023A57014CF8>]
sheets = excel.sheets()
# 获取sheet总数,返回数字-2
sheet_num = excel.nsheets
# 获取某一个sheet对象
sheet_index = excel.sheet_by_index(0) # 根据索引
sheet_name = excel.sheet_by_name("测试") # 根据名称
# 获取sheet对象相关信息
name = sheet_index.name # 返回sheet名称
rows = sheet_index.nrows # 返回行数
cols = sheet_index.ncols # 返回列数
# 批量获取单元格信息
row_value = sheet_index.row_values(2, 0, 4) # 获取某一行的值,返回列表,TODO 参数依次,第二行,从0开始,到第4列
col_value = sheet_index.col_values(0, 0, 4)
row = sheet_index.row(2) # 获取某一行的值和类型,不支持切片-[text:'123', text:'456', text:'789', text:'147', text:'11111111', text:'258', text:'']
col = sheet_index.col(1)
slice_row = sheet_index.row_slice(2, 0, 4) # 获取某一行的值和类型,支持切片
slice_col = sheet_index.col_slice(0, 0, 4)
# 获取特定单元格
cell_value = sheet_index.cell(1,2).value # 获取第2行,第三列的值
cell_value_ = sheet_index.cell_value(1,2)
# 获取单元格栏信息
print(xlrd.cellname(0,1))
print(xlrd.cellnameabs(0,1))
print(xlrd.colname(8))
# 写入数据库
import pymysql
# 连接数据库
coon = pymysql.connect(
host="192.168.200.10",
db="test_zwl",
user="bdsdata",
password="357135",
port=3306
)
cur = coon.cursor()
# TODO 查询
# sql = "select * from file"
# cur.execute(sql)
# result = cur.fetchone()
# print(result)
# TODO 插入数据
row_num = sheet_index.nrows
col_num = sheet_index.ncols
# 构造sql语句,批量插入数据库 values(),(),(),没有选择一条一条的插入
sql = "insert into file values"
for i in range(1,row_num): # 控制每一行
for j in range(0,col_num): # 控制列
item = sheet_index.cell_value(i, j) # 获取指定单元格数值
# TODO 数据库中的空值两种形式,一种空字符串--数据库显示空白,另一种是null,且不能用引号包裹起来--数据库显示为null
if item == "":
item = "Null"
value = str(item)
else:
value = '"' + str(item) + '"'
if i != row_num-1:
if j == 0 :
sql += "(" + str(i) + ","+ value + "," # TODO 插入的item 要用 ”“包起来,不然报错 1064,但是null不可以包
elif j == col_num-1:
sql += value + "),"
else:
sql += value + ","
else:
if j == 0 :
sql += "(" + str(i) + ","+ value + ","
elif j == col_num-1:
sql += value + ")"
else:
sql += value + ","
# break
# print(sql)
# try:
# cur.execute(sql)
# coon.commit() # TODO 不要忘记提交啊
# except:
# coon.rollback()
value_list = []
for i in range(1,row_num):
row_v = sheet_index.row_values(i)
row_v = [None if row == "" else row for row in row_v ] # None在数据库显示为Null
value_list.append(row_v)
sql_many = "insert into file (name,area,department,job_state,phone,in_date,out_date)values(%s,%s,%s,%s,%s,%s,%s)"
try:
cur.executemany(sql_many,value_list)
coon.commit() # TODO 不要忘记提交啊
except Exception as e:
print(e)
coon.rollback()
cur.close()
coon.close() | [
"[email protected]"
] | |
6065c3b9f25c89d6d2bdd348455c1fbabbe4dbe6 | 138389945a62634e2ffc3db7e1e011c2059cbcd4 | /filterProject/filtergram.py | e92e4673fa31c2313423e8609936b2c33798b375 | [] | no_license | gomezquinteroD/GWC2019 | 1448db3c7ab847ca7aa6003d2be3f820e3f6529c | bb0a13b36235ccfcd3863f783d4bae6688203658 | refs/heads/master | 2020-06-19T07:37:20.812491 | 2019-08-08T16:09:47 | 2019-08-08T16:09:47 | 196,620,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from filters import *
def main():
print("Image Editor: ")
filen = input("Give the file name: ")
imgs = load_img(filen)
print("Loading image...")
show_img(imgs)
print("Showing image...")
oImg = obamicon(imgs)
print("Obamicon activated...")
show_img(oImg)
main()
| [
"[email protected]"
] | |
a5fa4b57b83b141e36d8a93815e1e8d828b4aaba | 4772576b2f7601fb3295cec7756c832c250ffbc2 | /max.py | 9816e594e10a7248d892a993d1deccedf0c7b493 | [] | no_license | Dhineshkumarraveendiran/Guvi | db3a956025299fcb2fd06911cc322403c0027ca1 | 3904a980fa59dd079473a4d68c345ed5116160f1 | refs/heads/master | 2020-04-15T04:59:55.974890 | 2019-05-15T10:11:59 | 2019-05-15T10:11:59 | 164,405,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | #r
n =int(input())
li=list(map(int,input().split()))
max1=max(li)
print(max1)
| [
"[email protected]"
] | |
69837db1e369e1f6ffa70b28dd26005a8cc1e844 | d04e45045781d6c8c4f02f3b7f2c48418fbab330 | /fornumber.py | c50c2497a2210c63f7b53de79a954aca8558a1fd | [] | no_license | alcal3/CSS-301-Portfolio | 7825d99dc3841bfc8e611e11d6a624744ced2d8a | bec01c246b9f4a0662b64d0d134deca454a1f442 | refs/heads/master | 2020-05-09T20:31:04.642477 | 2019-06-11T21:07:32 | 2019-06-11T21:07:32 | 181,410,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | #aleks calderon
#4.23.2019
#iterative program part a
for x in range(1, 20):
#for loop goes through numbers 1-20
if x < 1:
x = 1
else:
x = x * x - 1
print(x)
| [
"[email protected]"
] | |
65f5d5d7db31e03fff05009390b6ac2b06cc7f29 | 5d58fa1d54855f18bad5688de4459af8d461c0ac | /plugins/callback/yaml.py | 40bc0191f254fdf8b7a04ea6c86e06ff50051353 | [] | no_license | nasirhm/general | b3b52f6e31be3de8bae0414da620d8cdbb2c2366 | 5ccd89933297f5587dae5cd114e24ea5c54f7ce5 | refs/heads/master | 2021-01-04T07:03:21.121102 | 2020-02-13T20:59:56 | 2020-02-13T20:59:56 | 240,440,187 | 1 | 0 | null | 2020-02-14T06:08:14 | 2020-02-14T06:08:13 | null | UTF-8 | Python | false | false | 4,855 | py | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: yaml
type: stdout
short_description: yaml-ized Ansible screen output
description:
- Ansible output that can be quite a bit easier to read than the
default JSON formatting.
requirements:
- set as stdout in configuration
extends_documentation_fragment:
- default_callback
'''
import yaml
import json
import re
import string
import sys
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
from ansible.plugins.callback.default import CallbackModule as Default
# from http://stackoverflow.com/a/15423007/115478
def should_use_block(value):
"""Returns true if string should be in block format"""
for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
if c in value:
return True
return False
def my_represent_scalar(self, tag, value, style=None):
"""Uses block style for multi-line strings"""
if style is None:
if should_use_block(value):
style = '|'
# we care more about readable than accuracy, so...
# ...no trailing space
value = value.rstrip()
# ...and non-printable characters
value = ''.join(x for x in value if x in string.printable)
# ...tabs prevent blocks from expanding
value = value.expandtabs()
# ...and odd bits of whitespace
value = re.sub(r'[\x0b\x0c\r]', '', value)
# ...as does trailing space
value = re.sub(r' +\n', '\n', value)
else:
style = self.default_style
node = yaml.representer.ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
class CallbackModule(Default):
"""
Variation of the Default output which uses nicely readable YAML instead
of JSON for printing results.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'community.general.yaml'
def __init__(self):
super(CallbackModule, self).__init__()
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
dumped = ''
# put changed and skipped into a header line
if 'changed' in abridged_result:
dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
del abridged_result['changed']
if 'skipped' in abridged_result:
dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
del abridged_result['skipped']
# if we already have stdout, we don't need stdout_lines
if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
abridged_result['stdout_lines'] = '<omitted>'
# if we already have stderr, we don't need stderr_lines
if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
abridged_result['stderr_lines'] = '<omitted>'
if abridged_result:
dumped += '\n'
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
# indent by a couple of spaces
dumped = '\n '.join(dumped.split('\n')).rstrip()
return dumped
def _serialize_diff(self, diff):
return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
| [
"[email protected]"
] | |
56692b2d8d4fdb1f80704b8bf94e317d10fe573e | de8d784448e30cd93a9745d13a189c391df514c1 | /DIP/dip_manager/migrations/0004_auto_20190601_1923.py | 3c6f2953863219c791cf0764a4221dc5b9aac38b | [] | no_license | Blackstee/DataInPocket | 3c30f64598195f75cb7a843000eaeb709d5f2f91 | 28ebad40d615f76bee32b9e3c6c6410b2a505ca7 | refs/heads/master | 2022-04-26T23:21:34.125211 | 2019-06-04T22:53:49 | 2019-06-04T22:53:49 | 187,661,939 | 1 | 0 | null | 2022-04-22T21:25:41 | 2019-05-20T14:57:38 | CSS | UTF-8 | Python | false | false | 1,787 | py | # Generated by Django 2.2.1 on 2019-06-01 16:23
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('dip_manager', '0003_auto_20190531_2334'),
]
operations = [
migrations.RenameField(
model_name='pic_task',
old_name='comment',
new_name='task',
),
migrations.AlterField(
model_name='change',
name='date_post',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 1, 16, 23, 8, 19211, tzinfo=utc), null=True),
),
migrations.AlterField(
model_name='comment',
name='date_post',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 1, 16, 23, 8, 16864, tzinfo=utc), null=True),
),
migrations.AlterField(
model_name='comment',
name='date_update',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 1, 16, 23, 8, 16912, tzinfo=utc), null=True),
),
migrations.AlterField(
model_name='suggestion',
name='date_post',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 1, 16, 23, 8, 18073, tzinfo=utc), null=True),
),
migrations.AlterField(
model_name='task',
name='date_start',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 1, 16, 23, 8, 13497, tzinfo=utc), null=True),
),
migrations.AlterField(
model_name='task',
name='date_update',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 1, 16, 23, 8, 14614, tzinfo=utc), null=True),
),
]
| [
"[email protected]"
] | |
4fbe70fbf88650d84fb87c57199e97908cac72f3 | 1bf7673846aedb5beed2d065f971f2985f70df1b | /lib/stashcache_tester/output/githubOutput.py | e82f08f5b95658cef25fec44fc13f3b8251cb8d3 | [] | no_license | StashCache/stashcache-tester | 31ee90945186821f9bb0979c7bee942037ae05e7 | 5031d294050e9c6419c360e804654850efcfa32c | refs/heads/master | 2020-12-25T14:12:41.392207 | 2017-02-23T17:55:51 | 2017-02-23T17:55:51 | 40,491,284 | 0 | 2 | null | 2017-02-23T17:55:52 | 2015-08-10T15:51:17 | Python | UTF-8 | Python | false | false | 6,685 | py |
import logging
import json
import time
import shutil
import os
import sys
from tempfile import NamedTemporaryFile
from stashcache_tester.output.generalOutput import GeneralOutput
from stashcache_tester.util.Configuration import get_option
from stashcache_tester.util.ExternalCommands import RunExternal
class GithubOutput(GeneralOutput):
"""
:param dict sitesData: Dictionary described in :ref:`sitesData <sitesData-label>`.
This class summarizes and uploads the download data to a github account. The data will be stored in a file named ``data.json`` in the git repo under the directory in the configuration. The format of ``data.json`` is::
{
"20150911": [
{
"average": 364.76526180827,
"name": "Tusker"
},
{
"average": 75.99734924610296,
"name": "UCSDT2"
},
...
],
"20150913": [
{
"average": 239.02169168535966,
"name": "Tusker"
},
...
],
...
}
Github output requires an SSH key to be added to the github repository which is pointed to by the `repo` configuration option.
Github output requires additional configuration options in the main configuration in the section `[github]`. An example configuration could be::
[github]
repo = StashCache/stashcache.github.io.git
branch = master
directory = data
ssh_key = /home/user/.ssh/id_rsa
The configuration is:
repo
The git repo to commit the data to.
branch
The branch to install repo.
directory
The directory to put the data summarized files into.
maxdays
The maximum number of days to keep data. Default=30
ssh_key
Path to SSH key to use when checking out and pushing to the repository.
"""
git_ssh_contents = """#!/bin/sh
exec ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY_FILE "$@"
"""
def __init__(self, sitesData):
GeneralOutput.__init__(self, sitesData)
def _get_option(self, option, default = None):
return get_option(option, section="github", default=default)
def _summarize_data(self, sitesData):
summarized = []
# Average download time per site.
for site in sitesData:
cur = {}
cur['name'] = site
siteTimes = sitesData[site]
total_runtime = 0
failures = 0
caches = {}
for run in siteTimes:
# Initialize the cache structure
cache = run['cache']
if cache not in caches:
caches[cache] = {}
caches[cache]['runs'] = 0
caches[cache]['totalRuntime'] = 0
caches[cache]['failures'] = 0
if run['success'] is True:
total_runtime += float(run['duration'])
caches[cache]['totalRuntime'] += float(run['duration'])
caches[cache]['runs'] += 1
else:
caches[cache]['failures'] += 1
failures += 1
testsize = get_option("raw_testsize")
if total_runtime == 0:
cur['average'] = 0
for cache in caches.keys():
caches[cache]['average'] = 0
else:
cur['average'] = (float(testsize*8) / (1024*1024)) / (total_runtime / len(siteTimes))
for cache in caches.keys():
caches[cache]['average'] = (float(testsize*8) / (1024*1024)) / (caches[cache]['totalRuntime'] / caches[cache]['runs'])
cur['caches'] = caches
cur['failures'] = failures
summarized.append(cur)
# Should we do violin plot?
#summarized = sitesData
return summarized
def startProcessing(self):
"""
Begin summarizing the data.
"""
summarized_data = self._summarize_data(self.sitesData)
logging.debug("Creating temporary file for GIT_SSH")
tmpfile = NamedTemporaryFile(delete=False)
tmpfile.write(self.git_ssh_contents)
git_sh_loc = tmpfile.name
logging.debug("Wrote contents of git_ssh_contents to %s" % git_sh_loc)
tmpfile.close()
import stat
os.chmod(git_sh_loc, stat.S_IXUSR | stat.S_IRUSR)
os.environ["GIT_SSH"] = git_sh_loc
# Download the git repo
git_repo = self._get_option("repo")
git_branch = self._get_option("branch")
key_file = self._get_option("ssh_key")
output_dir = self._get_option("directory")
os.environ["SSH_KEY_FILE"] = key_file
RunExternal("git clone --quiet --branch %s [email protected]:%s output_git" % (git_branch, git_repo))
# Write summarized data to the data file
data_filename = os.path.join("output_git", output_dir, "data.json")
if not os.path.exists(data_filename):
logging.error("Data file does not exist, bailing")
sys.exit(1)
with open(data_filename) as data_file:
data = json.load(data_file)
# Truncate the data to the latest `maxdays` days.
maxdays = self._get_option("maxdays", 30)
# Get and sort the keys
sorted_list = data.keys()
sorted_list.sort()
# Discard the last `maxdays` days (looking for what we need to delete)
to_delete = sorted_list[:-int(maxdays)]
for key in to_delete:
logging.debug("Removing data from %s" % key)
data.pop(key, None)
# Write today's summarized data
todays_key = time.strftime("%Y%m%d")
data[todays_key] = summarized_data
with open(data_filename, 'w') as data_file:
json.dump(data, data_file)
# Commit to git repo
RunExternal("cd output_git; git add -f .")
RunExternal("cd output_git; git commit -m \"Adding data for %s\"" % todays_key)
RunExternal("cd output_git; git push -fq origin %s" % git_branch)
shutil.rmtree("output_git")
| [
"[email protected]"
] | |
2cc66030c8992eb883ca25b0c0639e0625254c5d | 2d1a9cd436a2c5de70e0bf5a93fbd31970a1ff6b | /setup.py | e56c352a506419ddec00873e42ae9ebd5d02ff11 | [
"MIT"
] | permissive | tomstitt/nb_connect_existing | 052cc89cbbf0fab35d2feeef388533c76b4ebb01 | 44de6d37d28287f8cdce5dd0fcef1d4cba22e2d9 | refs/heads/master | 2020-04-08T14:00:53.402336 | 2019-10-21T23:51:16 | 2019-10-21T23:51:16 | 159,418,217 | 0 | 0 | MIT | 2019-02-11T17:50:43 | 2018-11-28T00:11:47 | Python | UTF-8 | Python | false | false | 552 | py | import setuptools
distname = "nb_connect_existing"
setuptools.setup(
name=distname,
packages=[distname],
include_package_data=True,
data_files=[
("share/jupyter/nbextensions/%s" % distname, [
"%s/static/index.js" % distname,
]),
("etc/jupyter/nbconfig/tree.d", [
"etc/jupyter/nbconfig/tree.d/%s.json" % distname
]),
("etc/jupyter/jupyter_notebook_config.d", [
"etc/jupyter/jupyter_notebook_config.d/%s.json" % distname
])
],
zip_safe=False
)
| [
"[email protected]"
] | |
c1dd30a27620a1d021d4dd38f8ff19892a4ca76c | a3160604e980796bfc93ca0fbc647f9767022a14 | /lstm.py | 6d3df3d34441194159fea72d263e1008fb1ec197 | [
"MIT"
] | permissive | JayeshKriplani/Sentiment-Analysis-on-Tweets | 033b67fa902d833116a2deb512ea904b051dbd09 | 1a507c3a18f749f5fa615f72a85ff348d96a06aa | refs/heads/master | 2023-08-18T19:05:57.255228 | 2021-10-07T17:53:21 | 2021-10-07T17:53:21 | 300,169,736 | 0 | 5 | MIT | 2020-10-15T10:35:03 | 2020-10-01T06:22:45 | Jupyter Notebook | UTF-8 | Python | false | false | 4,535 | py | import numpy as np
import sys
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers import LSTM
import utils
from keras.preprocessing.sequence import pad_sequences
# Performs classification using LSTM network.
FREQ_DIST_FILE = '../train-processed-freqdist.pkl'
BI_FREQ_DIST_FILE = '../train-processed-freqdist-bi.pkl'
TRAIN_PROCESSED_FILE = '../train-processed.csv'
TEST_PROCESSED_FILE = '../test-processed.csv'
GLOVE_FILE = './dataset/glove-seeds.txt'
dim = 200
def get_glove_vectors(vocab):
print 'Looking for GLOVE vectors'
glove_vectors = {}
found = 0
with open(GLOVE_FILE, 'r') as glove_file:
for i, line in enumerate(glove_file):
utils.write_status(i + 1, 0)
tokens = line.split()
word = tokens[0]
if vocab.get(word):
vector = [float(e) for e in tokens[1:]]
glove_vectors[word] = np.array(vector)
found += 1
print '\n'
print 'Found %d words in GLOVE' % found
return glove_vectors
def get_feature_vector(tweet):
words = tweet.split()
feature_vector = []
for i in range(len(words) - 1):
word = words[i]
if vocab.get(word) is not None:
feature_vector.append(vocab.get(word))
if len(words) >= 1:
if vocab.get(words[-1]) is not None:
feature_vector.append(vocab.get(words[-1]))
return feature_vector
def process_tweets(csv_file, test_file=True):
tweets = []
labels = []
print 'Generating feature vectors'
with open(csv_file, 'r') as csv:
lines = csv.readlines()
total = len(lines)
for i, line in enumerate(lines):
if test_file:
tweet_id, tweet = line.split(',')
else:
tweet_id, sentiment, tweet = line.split(',')
feature_vector = get_feature_vector(tweet)
if test_file:
tweets.append(feature_vector)
else:
tweets.append(feature_vector)
labels.append(int(sentiment))
utils.write_status(i + 1, total)
print '\n'
return tweets, np.array(labels)
if __name__ == '__main__':
train = len(sys.argv) == 1
np.random.seed(1337)
vocab_size = 90000
batch_size = 500
max_length = 40
filters = 600
kernel_size = 3
vocab = utils.top_n_words(FREQ_DIST_FILE, vocab_size, shift=1)
glove_vectors = get_glove_vectors(vocab)
tweets, labels = process_tweets(TRAIN_PROCESSED_FILE, test_file=False)
embedding_matrix = np.random.randn(vocab_size + 1, dim) * 0.01
for word, i in vocab.items():
glove_vector = glove_vectors.get(word)
if glove_vector is not None:
embedding_matrix[i] = glove_vector
tweets = pad_sequences(tweets, maxlen=max_length, padding='post')
shuffled_indices = np.random.permutation(tweets.shape[0])
tweets = tweets[shuffled_indices]
labels = labels[shuffled_indices]
if train:
model = Sequential()
model.add(Embedding(vocab_size + 1, dim, weights=[embedding_matrix], input_length=max_length))
model.add(Dropout(0.4))
model.add(LSTM(128))
model.add(Dense(64))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath = "./models/lstm-{epoch:02d}-{loss:0.3f}-{acc:0.3f}-{val_loss:0.3f}-{val_acc:0.3f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor="loss", verbose=1, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, min_lr=0.000001)
print model.summary()
model.fit(tweets, labels, batch_size=128, epochs=5, validation_split=0.1, shuffle=True, callbacks=[checkpoint, reduce_lr])
else:
model = load_model(sys.argv[1])
print model.summary()
test_tweets, _ = process_tweets(TEST_PROCESSED_FILE, test_file=True)
test_tweets = pad_sequences(test_tweets, maxlen=max_length, padding='post')
predictions = model.predict(test_tweets, batch_size=128, verbose=1)
results = zip(map(str, range(len(test_tweets))), np.round(predictions[:, 0]).astype(int))
utils.save_results_to_csv(results, 'lstm.csv')
| [
"[email protected]"
] | |
2066351bd270fca74bcedff272d08e9570fd7078 | 94b820091cf25a70ae1d4fe416cabb23bf52af16 | /2016/day05/solution.py | e60db96bb69afb2808089083a053027e6ad567e6 | [] | no_license | JIghtuse/adventofcode-solutions | 049fe0084591b205c531d3dd060b76d149989d30 | ae4b06a8a5572d9d08ade6e02f9babbd582b19ac | refs/heads/master | 2021-05-04T10:04:01.398777 | 2017-12-05T16:39:58 | 2017-12-05T16:39:58 | 49,514,418 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | #!/usr/bin/python3
import hashlib
DOOR_ID = "abbhdwsy"
def solve_first():
index = 0
digest = ""
password = ""
while len(password) != 8:
while not digest.startswith("00000"):
digest = hashlib.md5((DOOR_ID + str(index)).encode("utf-8")).hexdigest()
index += 1
password += digest[5]
print(password)
digest = ""
return password
def solve_second():
index = 0
digest = ""
password = " " * 8
while password.find(" ") != -1:
while not digest.startswith("00000"):
s = DOOR_ID + str(index)
digest = hashlib.md5(s.encode("utf-8")).hexdigest()
index += 1
try:
pos = int(digest[5])
if pos < 8 and password[pos] == ' ':
password = password[:pos] + digest[6] + password[pos+1:]
print("'{}'".format(password))
except ValueError:
continue
finally:
digest = ""
index += 1
return password
print(solve_first())
print(solve_second())
| [
"[email protected]"
] | |
5c49e0ec04fe15cf08be854625cc496120e28c5f | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/keyboard/keyboard/_keyboard_event.pyi | 9c511fdccf59bc88a0fc4b133c00ab9036b835c7 | [
"MIT",
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 827 | pyi | from typing_extensions import Literal
from ._canonical_names import canonical_names as canonical_names, normalize_name as normalize_name
KEY_DOWN: Literal["down"]
KEY_UP: Literal["up"]
class KeyboardEvent:
event_type: Literal["down", "up"] | None
scan_code: int
name: str | None
time: float | None
device: str | None
modifiers: tuple[str, ...] | None
is_keypad: bool | None
def __init__(
self,
event_type: Literal["down", "up"] | None,
scan_code: int,
name: str | None = ...,
time: float | None = ...,
device: str | None = ...,
modifiers: tuple[str, ...] | None = ...,
is_keypad: bool | None = ...,
) -> None: ...
def to_json(self, ensure_ascii: bool = ...) -> str: ...
def __eq__(self, other: object) -> bool: ...
| [
"[email protected]"
] | |
6f3afb80f98a418d11eb0cdf9a702c511fa7ef73 | f56c7699e814e9ea6b699a9636671227b6021df6 | /codeforces/myPractice/dimaAndFriends.py | 8228d18512b2dd8b41b65dc732a310ef63c68614 | [] | no_license | rishiraj52/Competitive-Programming-CP | 0646d789f1f3b87ea6866bb1fec2f0efffa71a1a | 091b7df0232ae8bf1f42bfce6136cb6e4d6d3844 | refs/heads/main | 2023-08-15T06:59:01.139406 | 2021-10-09T11:41:37 | 2021-10-09T11:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | n=int(input())
l=list(map(int,input().split()))
s=sum(l)
# s=s%(n+1)
tp=[]
for i in range(1,6):
tp.append(i+s)
c=0
for i in range(len(tp)):
tp[i]=tp[i]%(n+1)
if(tp[i]!=1):
c+=1
print(c)
| [
"[email protected]"
] | |
233d0ec4819d640232d4c681a2454a1e0e5966e1 | 23d25497d30accc7125f6068ad7c55ebcbea0160 | /Python/10828.py | a809aac64a520ee155f389f3eeaf52107078f583 | [] | no_license | ParkJeongseop/Algorithm | 460689e064529d65e8612493a5d338305ec6311e | 388d092ee8b07b7ea76e720053c782790563515b | refs/heads/master | 2023-08-30T23:19:46.029510 | 2023-08-09T11:08:56 | 2023-08-09T11:08:56 | 149,557,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | import sys; input = lambda:sys.stdin.readline().rstrip()
n = int(input())
a = []
for _ in range(n):
cmd = input().split()
if cmd[0] == 'push':
a.append(cmd[1])
elif cmd[0] == 'pop':
if a:
print(a.pop())
else:
print(-1)
elif cmd[0] == 'size':
print(len(a))
elif cmd[0] == 'empty':
print(0 if len(a) else 1)
elif cmd[0] == 'top':
if a:
print(a[-1])
else:
print(-1) | [
"[email protected]"
] | |
35bbad1c68e22cd2e649d0005986964a8941d3a7 | 63cf26ee97b2c2d97aa951c319b75c340cd839ec | /catalog/views.py | 796da70cfaac9841713dd34fb4ea83554e3a3e4a | [] | no_license | Junlli/django_local_library | 4a7a2a155380251e8eff42347cc5a195a30e6adf | c54541bf922d0bb180228263038f505e4f54bab6 | refs/heads/master | 2020-03-27T22:21:59.610300 | 2018-11-28T16:06:18 | 2018-11-28T16:06:18 | 147,224,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,390 | py | from django.shortcuts import render
# Create your views here.
from .models import Book, Author, BookInstance, Genre
from django.core.paginator import Paginator
from django.shortcuts import render
def index(request, page_number = '1'):
"""
View function for home page of site.
"""
# Generate counts of some of the main objects
num_books = Book.objects.all().count() # 获取记录计数(书本)
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact='a').count()
num_authors = Author.objects.count() # The 'all()' is implied by default.
num_genres = Genre.objects.count()
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits+1
book_list = Book.objects.all()
p = Paginator(book_list, 20)
current_page = p.page(int(page_number))
# Render the HTML template index.html with the data in the context variable
return render(
request,
'index.html',
context={'num_books': num_books, 'num_instances': num_instances,
'num_instances_available': num_instances_available, 'num_authors': num_authors,
'num_genres': num_genres, 'num_visits': num_visits, 'book_list': book_list, 'pages': p,
'current_page': current_page})
from django.views import generic
class BookListView(generic.ListView):
model = Book
# context_object_name = 'book_list' # your own name for the list as a template variable
# queryset = Book.objects.filter(title__icontains='war')[:5] # Get 5 books containing the title war
# template_name = 'books/my_arbitrary_template_name_list.html' # Specify your own template name/location
paginate_by = 10 # 数据超过10条,就进行分页
# def get_context_data(self, **kwargs):
# # Call the base implementation first to get the context
# context = super(BookListView, self).get_context_data(**kwargs)
# # Create any data and add it to the context
# context['some_data'] = 'This is just some data'
# return context
class BookDetailView(generic.DetailView):
model = Book
# def book_detail_view(request, pk):
# try:
# book_id = Book.objects.get(pk=pk)
# except Book.DoesNotExist:
# raise Http404("Book does not exist")
#
# # book_id=get_object_or_404(Book, pk=pk)
#
# return render(
# request,
# 'catalog/book_detail.html',
# context={'book': book_id, }
# )
class AuthorListView(generic.ListView):
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
model = Author
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import permission_required
class LoanedBooksByUserListView(LoginRequiredMixin, generic.ListView):
"""
Generic class-based view listing books on loan to current user.
"""
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
"""
Generic class-based view listing all books on loan. Only visible to users with can_mark_returned permission.
"""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from django.contrib.auth.decorators import permission_required
from .forms import RenewBookForm
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
book_inst = get_object_or_404(BookInstance, pk=pk) # 从模型返回指定的对象,不存在则返回404
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed')) # 创建指向指定URL的重定向
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date,})
return render(request, 'catalog/book_renew_librarian.html', {'form': form, 'bookinst': book_inst})
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Author
from django.contrib.auth.mixins import PermissionRequiredMixin
class AuthorCreate(PermissionRequiredMixin, CreateView):
permission_required = 'catalog.can_mark_returned'
permission_required = ('catalog.can_mark_returned', 'catalog.can_edit')
model = Author
fields = '__all__' # 包含所有字段
initial = {'date_of_death': ''}
class AuthorUpdate(PermissionRequiredMixin, UpdateView):
permission_required = 'catalog.can_mark_returned'
permission_required = ('catalog.can_mark_returned', 'catalog.can_edit')
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
class AuthorDelete(PermissionRequiredMixin, DeleteView):
permission_required = 'catalog.can_mark_returned'
permission_required = ('catalog.can_mark_returned', 'catalog.can_edit')
model = Author
success_url = reverse_lazy('authors')
# 在删除作者后,重定向到我们的作者列表 - reverse_lazy()是一个延迟执行的reverse()版本,在这里使用,是因为我们提供了一个基于类的 URL 查看属性
from .models import Book
class BookCreate(PermissionRequiredMixin, CreateView):
permission_required = 'catalog.can_mark_returned'
permission_required = ('catalog.can_mark_returned', 'catalog.can_edit')
model = Book
fields = '__all__'
class BookUpdate(PermissionRequiredMixin, UpdateView):
permission_required = 'catalog.can_mark_returned'
permission_required = ('catalog.can_mark_returned', 'catalog.can_edit')
model = Book
fields = '__all__'
class BookDelete(PermissionRequiredMixin, DeleteView):
permission_required = 'catalog.can_mark_returned'
permission_required = ('catalog.can_mark_returned', 'catalog.can_edit')
model = Book
success_url = reverse_lazy('books')
class Xiaoshuo(generic.ListView):
model = Book
template_name = 'catalog/xiaoshuowenxue.html' # Specify your own template name/location
class Zhuanji(generic.ListView):
model = Book
template_name = 'catalog/mingrenzhuanji.html' # Specify your own template name/location
class Xuexi(generic.ListView):
model = Book
template_name = 'catalog/xuexijiaoyu.html' # Specify your own template name/location
class Lizhi(generic.ListView):
model = Book
template_name = 'catalog/chenggonglizhi.html' # Specify your own template name/location
class Ertong(generic.ListView):
model = Book
template_name = 'catalog/ertongduwu.html' # Specify your own template name/location
class Shenghuo(generic.ListView):
model = Book
template_name = 'catalog/shenghuoshishang.html' # Specify your own template name/location
class Renwen(generic.ListView):
model = Book
template_name = 'catalog/renwensheke.html' # Specify your own template name/location
class Xinli(generic.ListView):
model = Book
template_name = 'catalog/xinlibaike.html' # Specify your own template name/location | [
"[email protected]"
] | |
4144585f59160e7268a01a9b954689f44dcc5460 | 44a6e88da453a2e368b014e403843b0c955f21f4 | /utils/make_mock_solid_dir.py | 49e00504ae9b25d4b9a7a94ae096e077cf8d7ffc | [
"Artistic-2.0"
] | permissive | golharam/genomics | a26b1f9366203ec059cc2e49281909bfc16e6ab4 | ca0c7c239b0f04353e2f2fa897db9c24a1211596 | refs/heads/master | 2020-08-06T10:28:21.604129 | 2019-09-27T07:51:41 | 2019-09-27T07:51:41 | 212,943,378 | 0 | 0 | Artistic-2.0 | 2019-10-05T04:25:24 | 2019-10-05T04:25:23 | null | UTF-8 | Python | false | false | 1,728 | py | #!/usr/bin/env python
#
# make_mock_solid_dir.py: make mock SOLiD directory for test purposes
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# make_mock_solid_dir.py
#
#########################################################################
"""make_mock_solid_dir.py
Makes a mock SOLiD run directory with run_definition and barcode statistic
files plus mock csfasta and qual files, which can be used to test other
programs and scrips with.
It uses the TestUtils class from the SolidData module to build and populate
the mock directory structure.
Usage: make_mock_solid_dir.py
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
#
import os
import sys
# Put ../share onto Python search path for modules
SHARE_DIR = os.path.abspath(
os.path.normpath(
os.path.join(os.path.dirname(sys.argv[0]),'..','share')))
sys.path.append(SHARE_DIR)
try:
from bcftbx.test.test_SolidData import TestUtils
except ImportError as ex:
print("Error importing modules: %s" % ex)
if __name__ == "__main__":
paired_end = False
if '--paired-end' in sys.argv:
paired_end = True
elif len(sys.argv) > 1:
print("Usage: %s [--paired-end]" % os.path.basename(sys.argv[0]))
sys.exit(1)
# Make mock solid directory
if paired_end:
solid_dir = TestUtils().make_solid_dir_paired_end('solid0123_20111014_PE_BC')
else:
solid_dir = TestUtils().make_solid_dir('solid0123_20111014_FRAG_BC')
print("Constructed mock dir: %s" % solid_dir)
| [
"[email protected]"
] | |
2cf88213019f14d023072bcdb66ce6c59f33d25d | 36a1a925ce413a5a7fb2f7532195a05a912582ff | /migrations/versions/ac3bccd67d33_.py | 9b49b96f5a0abcfce107a3b08538b2dc2c176b0e | [
"MIT"
] | permissive | jonathankamau/CP2-bucket-list-api | 3bd185162f7d73dc90dd19b9362ed7d5229d7b23 | 2e0fc959f9c1f2e3014a9ff13e46831a86454d24 | refs/heads/develop | 2021-01-21T20:23:00.072005 | 2017-01-21T12:25:06 | 2017-01-21T12:25:06 | 92,224,059 | 0 | 0 | MIT | 2020-07-19T23:47:49 | 2017-05-23T21:57:28 | Python | UTF-8 | Python | false | false | 2,100 | py | """empty message
Revision ID: ac3bccd67d33
Revises:
Create Date: 2017-01-11 15:55:18.005603
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ac3bccd67d33'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=True),
sa.Column('password_hash', sa.BINARY(length=60), nullable=True),
sa.Column('date_created', sa.DATETIME(), nullable=True),
sa.Column('token', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_Users_username'), 'Users', ['username'], unique=True)
op.create_table('Bucketlists',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=125), nullable=False),
sa.Column('date_created', sa.DATETIME(), nullable=True),
sa.Column('date_modified', sa.DATETIME(), nullable=True),
sa.Column('created_by', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['created_by'], ['Users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('BucketlistItems',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.String(), nullable=True),
sa.Column('date_created', sa.DATETIME(), nullable=True),
sa.Column('date_modified', sa.DATETIME(), nullable=True),
sa.Column('bucketlist_id', sa.Integer(), nullable=True),
sa.Column('done', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['bucketlist_id'], ['Bucketlists.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('BucketlistItems')
op.drop_table('Bucketlists')
op.drop_index(op.f('ix_Users_username'), table_name='Users')
op.drop_table('Users')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
344314724faaff944a443750a3b50494c9c38b40 | 3849f74abb8dafef7bead636c057f1daa57322fe | /ProjectEuler/new.py | 0c7471b7a964df2bce53ee0bc569736b6573acb0 | [] | no_license | HaugenBits/CompProg | 45d322e68703e5f1a809f3221f5cf3e39327455b | cd3964fe75419b910fa1ccd1bbf925ec188003a0 | refs/heads/master | 2023-02-15T10:33:15.915271 | 2021-01-07T15:53:53 | 2021-01-07T15:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | def door_open(status):
kontroll_liste = list(status)
print(kontroll_liste)
resultat_liste = []
print(resultat_liste)
if kontroll_liste[8] != 'P' or kontroll_liste[3]==0: # sjekker at gir er P eller sjekker om hovedbryter er på
return resultat_liste # hvis ikke P eller hovedbryter av returnerer den tomme listen
else:
if kontroll_liste[2]== 0 : # hvis barnesikringen er av
if kontroll_liste[0]==1 or kontroll_liste[4] == 1 or kontroll_liste[5] == 1 : # Hvis bryter eller at håndtakene for venstre dør er i bruk.
resultat_liste.append("left")
print(resultat_liste[:])
elif kontroll_liste[1]==1 or kontroll_liste[6] == 1 or kontroll_liste[7] ==1 : # Hvis bryter eller at håndtakene for høyre dør er i bruk.
resultat_liste.append("rigth")
elif kontroll_liste[0]==1 or kontroll_liste[5]==1: # barnesikring på, sjekker venstre håndtak ute og bryter
resultat_liste.append("left")
print(resultat_liste)
elif kontroll_liste[1]==1 or kontroll_liste[7] ==1: # barnesikring på, sjekker høyre håndtak ute og bryter
resultat_liste.append("rigth")
elif kontroll_liste[0]==0 and kontroll_liste[1] == 0 and kontroll_liste[6]== 0 and kontroll_liste[7]==0 : # sjekker at ingen bryter og håndtak brukes
resultat_liste = []
return resultat_liste
print(door_open('00010100P'))
| [
"[email protected]"
] | |
73d1f2793e1d0bef481611d80af78f725370823a | a4e18f63187a90a1699a4119f53fef0ba4d7ac39 | /frontpage/apps.py | c3872b6e1a86fb947efd8140b57fbf35ca56d104 | [] | no_license | Code-Institute-Submissions/lionkiller900-Ecommerce_website_JuneResub | 5e071a7ec5715b0d71ab04eea63d20254497e23e | 3a31dbc3b23b1075213073db42aac17dedd98007 | refs/heads/master | 2023-06-04T16:26:27.549116 | 2021-06-13T12:15:45 | 2021-06-13T12:15:45 | 377,456,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from django.apps import AppConfig
class HomeConfig(AppConfig):
name = 'frontpage'
| [
"[email protected]"
] | |
191671c9fb6640b6e84f4f70774511da1adad553 | 3c7b3948a33ff9c3478e90d9d531e2f25a02659f | /slackbot/run.py | 63a3add248963ccfb0a1da59e0a352e60edca0c3 | [] | no_license | h0uk1/RaspLock | 3b1834908a6af999e8525c6999e32096036d72e4 | 498a7b2caab004c3313f5cd17540e6e25e0fde9b | refs/heads/master | 2020-04-17T07:04:14.970052 | 2019-02-22T06:12:29 | 2019-02-22T06:12:29 | 166,352,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | # coding: utf-8
from slackbot.bot import Bot
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
print('start slackbot')
main() | [
"[email protected]"
] | |
11754e433ee8f5985f0ae11f9bae4e8dc50213e1 | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/tensorboard_utils_20210411113117.py | 69315f5c5b16b26260ed37152698eb1eba53cc5e | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,424 | py | """
Project 4 - CNNs
CS1430 - Computer Vision
Brown University
"""
import io
import os
import re
import sklearn.metrics
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import hyperparameters as hp
def plot_to_image(figure):
""" Converts a pyplot figure to an image tensor. """
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return image
class ImageLabelingLogger(tf.keras.callbacks.Callback):
""" Keras callback for logging a plot of test images and their
predicted labels for viewing in Tensorboard. """
def __init__(self, logs_path, datasets):
super(ImageLabelingLogger, self).__init__()
self.datasets = datasets
self.task = datasets.task
self.logs_path = logs_path
print("Done setting up image labeling logger.")
def on_epoch_end(self, epoch, logs=None):
self.log_image_labels(epoch, logs)
def log_image_labels(self, epoch_num, logs):
""" Writes a plot of test images and their predicted labels
to disk. """
fig = plt.figure(figsize=(9, 9))
count = 0
for batch in self.datasets.test_data: # changed from train to test
for i, image in enumerate(batch[0]):
plt.subplot(5, 5, count+1)
correct_class_idx = batch[1][i]
probabilities = self.model(np.array([image])).numpy()[0]
predict_class_idx = np.argmax(probabilities)
image = np.clip(image, 0., 1.)
plt.imshow(image, cmap='gray')
is_correct = correct_class_idx == predict_class_idx
title_color = 'g' if is_correct else 'r'
plt.title(
self.datasets.idx_to_class[predict_class_idx],
color=title_color)
plt.axis('off')
count += 1
if count == 25:
break
if count == 25:
break
figure_img = plot_to_image(fig)
file_writer_il = tf.summary.create_file_writer(
self.logs_path + os.sep + "image_labels")
with file_writer_il.as_default():
tf.summary.image("Image Label Predictions",
figure_img, step=epoch_num)
class ConfusionMatrixLogger(tf.keras.callbacks.Callback):
""" Keras callback for logging a confusion matrix for viewing
in Tensorboard. """
def __init__(self, logs_path, datasets):
super(ConfusionMatrixLogger, self).__init__()
self.datasets = datasets
self.logs_path = logs_path
def on_epoch_end(self, epoch, logs=None):
self.log_confusion_matrix(epoch, logs)
def log_confusion_matrix(self, epoch, logs):
""" Writes a confusion matrix plot to disk. """
test_pred = []
test_true = []
count = 0
for i in self.datasets.test_data:
test_pred.append(self.model.predict(i[0]))
test_true.append(i[1])
count += 1
if count >= 1500 / hp.batch_size:
break
test_pred = np.array(test_pred)
test_pred = np.argmax(test_pred, axis=-1).flatten()
test_true = np.array(test_true).flatten()
# Source: https://www.tensorflow.org/tensorboard/image_summaries
cm = sklearn.metrics.confusion_matrix(test_true, test_pred)
figure = self.plot_confusion_matrix(
cm, class_names=self.datasets.classes)
cm_image = plot_to_image(figure)
file_writer_cm = tf.summary.create_file_writer(
self.logs_path + os.sep + "confusion_matrix")
with file_writer_cm.as_default():
tf.summary.image(
"Confusion Matrix (on validation set)", cm_image, step=epoch)
def plot_confusion_matrix(self, cm, class_names):
""" Plots a confusion matrix returned by
sklearn.metrics.confusion_matrix(). """
# Source: https://www.tensorflow.org/tensorboard/image_summaries
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Greens)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
cm = np.around(cm.astype('float') / cm.sum(axis=1)
[:, np.newaxis], decimals=2)
threshold = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j],
horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
class CustomModelSaver(tf.keras.callbacks.Callback):
""" Custom Keras callback for saving weights of networks. """
def __init__(self, checkpoint_dir, task, max_num_weights=5):
super(CustomModelSaver, self).__init__()
self.checkpoint_dir = checkpoint_dir
self.task = task
self.max_num_weights = max_num_weights
def on_epoch_end(self, epoch, logs=None):
""" At epoch end, weights are saved to checkpoint directory. """
min_acc_file, max_acc_file, max_acc, num_weights = \
self.scan_weight_files()
cur_acc = logs["val_sparse_categorical_accuracy"]
# Only save weights if test accuracy exceeds the previous best
# weight file
if cur_acc > max_acc:
save_name = "weights.e{0:03d}-acc{1:.4f}.h5".format(
epoch, cur_acc)
if self.task == '1':
self.model.save_weights(
self.checkpoint_dir + os.sep + "your." + save_name)
else:
# Only save weights of classification head of VGGModel
self.model.head.save_weights(
self.checkpoint_dir + os.sep + "vgg." + save_name)
# Ensure max_num_weights is not exceeded by removing
# minimum weight
if self.max_num_weights > 0 and \
num_weights + 1 > self.max_num_weights:
os.remove(self.checkpoint_dir + os.sep + min_acc_file)
def scan_weight_files(self):
""" Scans checkpoint directory to find current minimum and maximum
accuracy weights files as well as the number of weights. """
min_acc = float('inf')
max_acc = 0
min_acc_file = ""
max_acc_file = ""
num_weights = 0
files = os.listdir(self.checkpoint_dir)
for weight_file in files:
if weight_file.endswith(".h5"):
num_weights += 1
file_acc = float(re.findall(
r"[+-]?\d+\.\d+", weight_file.split("acc")[-1])[0])
if file_acc > max_acc:
max_acc = file_acc
max_acc_file = weight_file
if file_acc < min_acc:
min_acc = file_acc
min_acc_file = weight_file
return min_acc_file, max_acc_file, max_acc, num_weights
| [
"[email protected]"
] | |
4bb26a5a9bf7e092ea415b2aa1d5f9635043ad18 | 75249b08e8988e632a069bb1d29e94cbab64bbbe | /src/controllers/PIDcontroller.py | 3d779ea690959a3557dc624bdbb921bc41d0ed5a | [] | no_license | beksultantuleev/Mambo_UAV_with_UWB | 1d3c584f2ed95141069a9993c69108c448b8574d | e9d8392a80f3ccfce54898fcc27275dfb6895b12 | refs/heads/master | 2023-04-27T11:54:53.066697 | 2021-05-12T22:17:39 | 2021-05-12T22:17:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,652 | py | from numpy.core.fromnumeric import size
import numpy as np
import time
class PIDcontroller:
def __init__(self):
self.current_state = [] # meters
self.desired_state = [] # meters #setpoint
# ________________
self.cmd_input = []
self.Kp = np.ones((1, 3))[0] * 7 # [7,7,7]
self.Ki =np.ones((1, 3))[0] * 0.2 # [0.2, 0.2, 0.2]
self.Kd =np.ones((1, 3))[0] * 0.25 # [0.25, 0.25, 0.25]
self.sample_time = 60
self.prev_error_values = [0, 0, 0]
self.max_values = 30
self.min_values = -30
self.error = [0, 0, 0]
self.errsum = [0, 0, 0]
self.last_time = 0.0001
self.derr = [0, 0, 0]
def set_desired_state(self, desired_state):
self.desired_state = desired_state
def set_current_state(self, current_state):
self.current_state = current_state
def calculate_cmd_input(self):
self.pid(self.desired_state)
# return self.cmd_input
return self.adjusted_cmd
def pid(self, desired_state):
self.desired_state = desired_state
self.error = list(np.subtract(self.current_state, self.desired_state))
# print(f"self erros is >>{self.error}")
self.now = int(round(time.time() * 1000))
self.timechange = self.now - self.last_time
if (self.timechange > self.sample_time):
# print(self.sample_time)
if (self.last_time != 0):
# Integration for Ki
if self.timechange >1000:
self.timechange =100
self.errsum = list(np.array(self.errsum) +np.array(self.error)* self.timechange/100)
# print(f"errorsum is {self.errsum}")
# print(f'error is >{self.error} and timechange is> {self.timechange} errsum is >{self.errsum}')
# Derivation for Kd
self.derr = list((np.array(self.error)- np.array(self.prev_error_values))/self.timechange)
# print(f"derivatives are {self.derr[1]}")
# Calculating output in 30
self.Pitch =-(self.Kp[0]*self.error[0]) - \
(self.Kd[0]*self.derr[0]) -(self.errsum[0]*self.Ki[0])
# print((self.errsum[0]*self.Ki[0]))
# print(f'errorsum is >>{self.errsum} and Ki is >>{self.Ki}, product is {self.errsum[0]*self.Ki[0]}')
self.Roll =-(self.Kp[1]*self.error[1]) + \
(self.Kd[1]*self.derr[1]) -(self.errsum[1]*self.Ki[1])
self.Throttle = -(self.Kp[2]*self.error[2]) + \
(self.Kd[2]*self.derr[2])-(self.errsum[2]*self.Ki[2])
# Checking min and max threshold and updating on true
# Throttle Conditions
if self.Throttle > self.max_values:
self.Throttle = self.max_values
if self.Throttle < self.min_values:
self.Throttle = self.min_values
# Pitch Conditions
if self.Pitch > self.max_values:
self.Pitch = self.max_values
if self.Pitch < self.min_values:
self.Pitch = self.min_values
# Roll Conditions
if self.Roll > self.max_values:
self.Roll = self.max_values
if self.Roll < self.min_values:
self.Roll = self.min_values
# Publishing values on topic 'drone command'
self.cmd_input = [self.Roll,
self.Pitch, 0, self.Throttle]
self.adjusted_cmd = []
for i in self.cmd_input:
if i>0:
if abs(i) < 0.1:
i = 0
elif (abs(i) >= 0.1 and abs(i) <= 5):
i = 5
self.adjusted_cmd.append(i)
else:
if abs(i) < 0.1:
i = 0
elif (abs(i) >= 0.1 and abs(i) <= 5):
i = -5
self.adjusted_cmd.append(i)
# print(self.adjusted_cmd)
# Updating prev values for all axis
self.prev_error_values = self.error
self.last_time = self.now
# return self.get_current_input(), self.error
if __name__ == "__main__":
mambo = PIDcontroller()
# ====================
# mambo.set_current_state([0, 0, 0])
# mambo.set_desired_state([1, 0, 0])
# u = mambo.calculate_cmd_input()
# print(u)
# ===================
destX = -10
num = 0
mambo.set_desired_state([destX, 0, 0])
while num >destX:
mambo.set_current_state([num,0,0])
u = mambo.calculate_cmd_input()
num -=0.5
time.sleep(0.1)
print(f"{u} at position>> {num}")
# ========================================
# destY = 3
# num = 0
# mambo.set_desired_state([0, destY, 0])
# while num <destY:
# mambo.set_current_state([0,num,0])
# u = mambo.calculate_cmd_input()
# num +=0.5
# time.sleep(0.1)
# print(f"{u} at position>> {num}")
# ========================================
# destZ = 3
# num = 0
# mambo.set_desired_state([0, 0, destZ])
# while num <destZ:
# mambo.set_current_state([0,0,num])
# u = mambo.calculate_cmd_input()
# num +=0.5
# time.sleep(0.1)
# print(f"{u} at position>> {num}")
| [
"[email protected]"
] | |
cce9c2c02347ccae443d5f1e8dbebf712c264d0e | 73e277935ef28fd05935c93a3f155c9cc6dc6de7 | /ctf/crypto/quad_residue/Cipolla.py | e07aed34561ff03170436108e72f4b49b2beca9e | [] | no_license | ohmygodlin/snippet | 5ffe6b8fec99abd67dd5d7f819520e28112eae4b | 21d02015492fb441b2ad93b4a455dc4a145f9913 | refs/heads/master | 2023-01-08T14:59:38.618791 | 2022-12-28T11:23:23 | 2022-12-28T11:23:23 | 190,989,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | #python3
#https://learnblockchain.cn/article/1520
def square_root_of_quadratic_residue(n, modulo):
"""Square root of quadratic residue
Solve the square root of quadratic residue using Cipolla's algorithm with Legendre symbol
Returns:
int -- if n is a quadratic residue,
return x, such that x^{2} = n (mod modulo)
otherwise, return -1
"""
if modulo == 2:
return 1
if n % modulo == 0:
return 0
Legendre = lambda n: pow(n, modulo - 1 >> 1, modulo)
if Legendre(n) == modulo - 1:
return -1
t = 0
while Legendre(t ** 2 - n) != modulo - 1:
t += 1
w = (t ** 2 - n) % modulo
return (generate_quadratic_field(w, modulo)(t, 1) ** (modulo + 1 >> 1)).x
def generate_quadratic_field(d, modulo=0):
"""Generate quadratic field number class
Returns:
class -- quadratic field number class
"""
assert(isinstance(modulo, int) and modulo >= 0)
class QuadraticFieldNumber:
def __init__(self, x, y):
self.x = x % modulo
self.y = y % modulo
def __mul__(self, another):
x = self.x * another.x + d * self.y * another.y
y = self.x * another.y + self.y * another.x
return self.__class__(x, y)
def __pow__(self, exponent):
result = self.__class__(1, 0)
if exponent:
temporary = self.__class__(self.x, self.y)
while exponent:
if exponent & 1:
result *= temporary
temporary *= temporary
exponent >>= 1
return result
def __str__(self):
return '({}, {} \\sqrt({}))'.format(self.x, self.y, d)
return QuadraticFieldNumber
a = 8479994658316772151941616510097127087554541274812435112009425778595495359700244470400642403747058566807127814165396640215844192327900454116257979487432016769329970767046735091249898678088061634796559556704959846424131820416048436501387617211770124292793308079214153179977624440438616958575058361193975686620046439877308339989295604537867493683872778843921771307305602776398786978353866231661453376056771972069776398999013769588936194859344941268223184197231368887060609212875507518936172060702209557124430477137421847130682601666968691651447236917018634902407704797328509461854842432015009878011354022108661461024768
p = 30531851861994333252675935111487950694414332763909083514133769861350960895076504687261369815735742549428789138300843082086550059082835141454526618160634109969195486322015775943030060449557090064811940139431735209185996454739163555910726493597222646855506445602953689527405362207926990442391705014604777038685880527537489845359101552442292804398472642356609304810680731556542002301547846635101455995732584071355903010856718680732337369128498655255277003643669031694516851390505923416710601212618443109844041514942401969629158975457079026906304328749039997262960301209158175920051890620947063936347307238412281568760161
x = square_root_of_quadratic_residue(a, p)
print(x)
print(pow(x,2,p) - a)
#x^2 = (p-x)^2 = n mod p | [
"[email protected]"
] | |
0eab1d6d33153e049b27a9a7787fb2e08c582bde | 036c70c9fb00800d804378a38edcf2fcad5c65c7 | /example/app.py | cfc989ac0e6da992feddb6fadb3527e430ea0b20 | [
"MIT"
] | permissive | vuonglv1612/apm-agent-utils | 71136b047e793ebd13b3a3bcf49eb3bff916de75 | ea804cac6bfd23096b4658952e58315058146e9d | refs/heads/master | 2023-06-05T11:20:30.624086 | 2021-06-21T04:14:07 | 2021-06-21T04:14:07 | 336,338,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | import logging
import elasticapm
from apm_agent_utils.utils import add_instrumentation
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask
from example.config import secret_token, server_url, service_name
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
es = ElasticAPM()
def create_app():
add_instrumentation("example.instrumentations.Test")
elasticapm.instrument()
app = Flask(__name__)
es.init_app(
app,
server_url=server_url,
service_name=service_name,
secret_token=secret_token,
debug=True,
)
return app
app = create_app()
| [
"[email protected]"
] | |
c62e6620434762221100e759917e7b1c439e7895 | 41dd4c10786cf0057c4494fe7991d1ea9b39a462 | /practicals/exp4/search_element_in_array.py | d5651b8732eb03c0b746f81a0c8a8432115ccc9c | [] | no_license | ItsMeVArun5/PythonPracticals | baea04e2543c998a34f0ec4bfe99a320c31cc80a | e63b0ab4fc195a2d5246423613d8ed14315c6788 | refs/heads/master | 2020-12-27T07:52:43.520907 | 2020-03-31T18:36:08 | 2020-03-31T18:36:08 | 237,821,645 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Write a Python Program to search an element in array and display its location (Create Definition searchInArray(element,array).
def searchArray(element, array):
pos = 0
index = 0
found = False
for x in array:
index += 1
if element == x:
found = True
pos = index
if found == True:
print ("location found and its position is:", pos)
else:
print ("Not Found")
array = [1,4,5,3,25]
element = int(input("Enter the element to search: "))
searchArray(element, array)
| [
"[email protected]"
] | |
ea807fde95aceafd55268cd9b3b8487b9dc91e09 | f5780a6d71ec4def28757a4f90f00ca6a0062ccf | /Tournament/capture.py | 5d3c4508e0ef1bbefd971ee8e86bbfcdba080f2d | [] | no_license | sqelliott/cs140-Tournament | 031602408f6facb27f71ffb5405cb494a2703724 | 3a6729dc89b8c5e1c084d1335d57c5e885319ace | refs/heads/master | 2021-01-19T19:59:34.586040 | 2017-03-12T02:24:45 | 2017-03-12T02:24:45 | 83,355,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,542 | py | # capture.py
# ----------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
Capture.py holds the logic for Pacman capture the flag.
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python capture.py' from the command line.
The keys are
P1: 'a', 's', 'd', and 'w' to move
P2: 'l', ';', ',' and 'p' to move
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
from game import Grid
from game import Configuration
from game import Agent
from game import reconstituteGrid
import sys, util, types, time, random
import keyboardAgents
# If you change these, you won't affect the server, so you can't cheat
KILL_POINTS = 0
SONAR_NOISE_RANGE = 13 # Must be odd
SONAR_NOISE_VALUES = [i - (SONAR_NOISE_RANGE - 1)/2 for i in range(SONAR_NOISE_RANGE)]
SIGHT_RANGE = 5 # Manhattan distance
MIN_FOOD = 2
SCARED_TIME = 40
def noisyDistance(pos1, pos2):
return int(util.manhattanDistance(pos1, pos2) + random.choice(SONAR_NOISE_VALUES))
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
def getLegalActions( self, agentIndex=0 ):
"""
Returns the legal actions for the agent specified.
"""
return AgentRules.getLegalActions( self, agentIndex )
def generateSuccessor( self, agentIndex, action):
"""
Returns the successor state (a GameState object) after the specified agent takes the action.
"""
# Copy current state
state = GameState(self)
# Find appropriate rules for the agent
AgentRules.applyAction( state, action, agentIndex )
AgentRules.checkDeath(state, agentIndex)
AgentRules.decrementTimer(state.data.agentStates[agentIndex])
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
state.data.timeleft = self.data.timeleft - 1
return state
def getAgentState(self, index):
return self.data.agentStates[index]
def getAgentPosition(self, index):
"""
Returns a location tuple if the agent with the given index is observable;
if the agent is unobservable, returns None.
"""
agentState = self.data.agentStates[index]
ret = agentState.getPosition()
if ret:
return tuple(int(x) for x in ret)
return ret
def getNumAgents( self ):
return len( self.data.agentStates )
def getScore( self ):
"""
Returns a number corresponding to the current score.
"""
return self.data.score
def getRedFood(self):
"""
Returns a matrix of food that corresponds to the food on the red team's side.
For the matrix m, m[x][y]=true if there is food in (x,y) that belongs to
red (meaning red is protecting it, blue is trying to eat it).
"""
return halfGrid(self.data.food, red = True)
def getBlueFood(self):
"""
Returns a matrix of food that corresponds to the food on the blue team's side.
For the matrix m, m[x][y]=true if there is food in (x,y) that belongs to
blue (meaning blue is protecting it, red is trying to eat it).
"""
return halfGrid(self.data.food, red = False)
def getRedCapsules(self):
return halfList(self.data.capsules, self.data.food, red = True)
def getBlueCapsules(self):
return halfList(self.data.capsules, self.data.food, red = False)
def getWalls(self):
"""
Just like getFood but for walls
"""
return self.data.layout.walls
def hasFood(self, x, y):
"""
Returns true if the location (x,y) has food, regardless of
whether it's blue team food or red team food.
"""
return self.data.food[x][y]
def hasWall(self, x, y):
"""
Returns true if (x,y) has a wall, false otherwise.
"""
return self.data.layout.walls[x][y]
def isOver( self ):
return self.data._win
def getRedTeamIndices(self):
"""
Returns a list of agent index numbers for the agents on the red team.
"""
return self.redTeam[:]
def getBlueTeamIndices(self):
"""
Returns a list of the agent index numbers for the agents on the blue team.
"""
return self.blueTeam[:]
def isOnRedTeam(self, agentIndex):
"""
Returns true if the agent with the given agentIndex is on the red team.
"""
return self.teams[agentIndex]
def getAgentDistances(self):
"""
Returns a noisy distance to each agent.
"""
if 'agentDistances' in dir(self) :
return self.agentDistances
else:
return None
def getDistanceProb(self, trueDistance, noisyDistance):
"Returns the probability of a noisy distance given the true distance"
if noisyDistance - trueDistance in SONAR_NOISE_VALUES:
return 1.0/SONAR_NOISE_RANGE
else:
return 0
def getInitialAgentPosition(self, agentIndex):
"Returns the initial position of an agent."
return self.data.layout.agentPositions[agentIndex][1]
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__( self, prevState = None ):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
self.blueTeam = prevState.blueTeam
self.redTeam = prevState.redTeam
self.data.timeleft = prevState.data.timeleft
self.teams = prevState.teams
self.agentDistances = prevState.agentDistances
else:
self.data = GameStateData()
self.agentDistances = []
def deepCopy( self ):
state = GameState( self )
state.data = self.data.deepCopy()
state.data.timeleft = self.data.timeleft
state.blueTeam = self.blueTeam[:]
state.redTeam = self.redTeam[:]
state.teams = self.teams[:]
state.agentDistances = self.agentDistances[:]
return state
def makeObservation(self, index):
state = self.deepCopy()
# Adds the sonar signal
pos = state.getAgentPosition(index)
n = state.getNumAgents()
distances = [noisyDistance(pos, state.getAgentPosition(i)) for i in range(n)]
state.agentDistances = distances
# Remove states of distant opponents
if index in self.blueTeam:
team = self.blueTeam
otherTeam = self.redTeam
else:
otherTeam = self.blueTeam
team = self.redTeam
for enemy in otherTeam:
seen = False
enemyPos = state.getAgentPosition(enemy)
for teammate in team:
if util.manhattanDistance(enemyPos, state.getAgentPosition(teammate)) <= SIGHT_RANGE:
seen = True
if not seen: state.data.agentStates[enemy].configuration = None
return state
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
if other == None: return False
return self.data == other.data
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
return int(hash( self.data ))
def __str__( self ):
return str(self.data)
def initialize( self, layout, numAgents):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numAgents)
positions = [a.configuration for a in self.data.agentStates]
self.blueTeam = [i for i,p in enumerate(positions) if not self.isRed(p)]
self.redTeam = [i for i,p in enumerate(positions) if self.isRed(p)]
self.teams = [self.isRed(p) for p in positions]
def isRed(self, configOrPos):
width = self.data.layout.width
if type(configOrPos) == type( (0,0) ):
return configOrPos[0] < width / 2
else:
return configOrPos.pos[0] < width / 2
def halfGrid(grid, red):
halfway = grid.width / 2
halfgrid = Grid(grid.width, grid.height, False)
if red: xrange = range(halfway)
else: xrange = range(halfway, grid.width)
for y in range(grid.height):
for x in xrange:
if grid[x][y]: halfgrid[x][y] = True
return halfgrid
def halfList(l, grid, red):
halfway = grid.width / 2
newList = []
for x,y in l:
if red and x <= halfway: newList.append((x,y))
elif not red and x > halfway: newList.append((x,y))
return newList
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
class CaptureRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, quiet = False):
self.quiet = quiet
def newGame( self, layout, agents, display, length, muteAgents, catchExceptions ):
initState = GameState()
initState.initialize( layout, len(agents) )
starter = random.randint(0,1)
print('%s team starts' % ['Red', 'Blue'][starter])
game = Game(agents, display, self, startingIndex=starter, muteAgents=muteAgents, catchExceptions=catchExceptions)
game.state = initState
game.length = length
game.state.data.timeleft = length
if 'drawCenterLine' in dir(display):
display.drawCenterLine()
self._initBlueFood = initState.getBlueFood().count()
self._initRedFood = initState.getRedFood().count()
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if 'moveHistory' in dir(game):
if len(game.moveHistory) == game.length:
state.data._win = True
if state.isOver():
game.gameOver = True
if not game.rules.quiet:
if state.getRedFood().count() == MIN_FOOD:
print 'The Blue team has captured all but %d of the opponents\' dots.' % MIN_FOOD
if state.getBlueFood().count() == MIN_FOOD:
print 'The Red team has captured all but %d of the opponents\' dots.' % MIN_FOOD
if state.getBlueFood().count() > MIN_FOOD and state.getRedFood().count() > MIN_FOOD:
print 'Time is up.'
if state.data.score == 0: print 'Tie game!'
else:
winner = 'Red'
if state.data.score < 0: winner = 'Blue'
print 'The %s team wins by %d points.' % (winner, abs(state.data.score))
def getProgress(self, game):
blue = 1.0 - (game.state.getBlueFood().count() / float(self._initBlueFood))
red = 1.0 - (game.state.getRedFood().count() / float(self._initRedFood))
moves = len(self.moveHistory) / float(game.length)
# return the most likely progress indicator, clamped to [0, 1]
return min(max(0.75 * max(red, blue) + 0.25 * moves, 0.0), 1.0)
def agentCrash(self, game, agentIndex):
if agentIndex % 2 == 0:
print "Red agent crashed"
game.state.data.score = -1
else:
print "Blue agent crashed"
game.state.data.score = 1
def getMaxTotalTime(self, agentIndex):
return 900 # Move limits should prevent this from ever happening
def getMaxStartupTime(self, agentIndex):
return 15 # 15 seconds for registerInitialState
def getMoveWarningTime(self, agentIndex):
return 1 # One second per move
def getMoveTimeout(self, agentIndex):
return 3 # Three seconds results in instant forfeit
def getMaxTimeWarnings(self, agentIndex):
return 2 # Third violation loses the game
class AgentRules:
"""
These functions govern how each agent interacts with her environment.
"""
def getLegalActions( state, agentIndex ):
"""
Returns a list of legal actions (which are both possible & allowed)
"""
agentState = state.getAgentState(agentIndex)
conf = agentState.configuration
possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
return AgentRules.filterForAllowedActions( agentState, possibleActions)
getLegalActions = staticmethod( getLegalActions )
def filterForAllowedActions(agentState, possibleActions):
return possibleActions
filterForAllowedActions = staticmethod( filterForAllowedActions )
def applyAction( state, action, agentIndex ):
"""
Edits the state to reflect the results of the action.
"""
legal = AgentRules.getLegalActions( state, agentIndex )
if action not in legal:
raise Exception("Illegal action " + str(action))
# Update Configuration
agentState = state.data.agentStates[agentIndex]
speed = 1.0
# if agentState.isPacman: speed = 0.5
vector = Actions.directionToVector( action, speed )
oldConfig = agentState.configuration
agentState.configuration = oldConfig.generateSuccessor( vector )
# Eat
next = agentState.configuration.getPosition()
nearest = nearestPoint( next )
if agentState.isPacman and manhattanDistance( nearest, next ) <= 0.9 :
AgentRules.consume( nearest, state, state.isOnRedTeam(agentIndex) )
# Change agent type
if next == nearest:
agentState.isPacman = [state.isOnRedTeam(agentIndex), state.isRed(agentState.configuration)].count(True) == 1
applyAction = staticmethod( applyAction )
def consume( position, state, isRed ):
x,y = position
# Eat food
if state.data.food[x][y]:
score = -1
if isRed: score = 1
state.data.scoreChange += score
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
if (isRed and state.getBlueFood().count() == MIN_FOOD) or (not isRed and state.getRedFood().count() == MIN_FOOD):
state.data._win = True
# Eat capsule
if isRed: myCapsules = state.getBlueCapsules()
else: myCapsules = state.getRedCapsules()
if( position in myCapsules ):
state.data.capsules.remove( position )
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
if isRed: otherTeam = state.getBlueTeamIndices()
else: otherTeam = state.getRedTeamIndices()
for index in otherTeam:
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod( consume )
def decrementTimer(state):
timer = state.scaredTimer
if timer == 1:
state.configuration.pos = nearestPoint( state.configuration.pos )
state.scaredTimer = max( 0, timer - 1 )
decrementTimer = staticmethod( decrementTimer )
def checkDeath( state, agentIndex):
agentState = state.data.agentStates[agentIndex]
if state.isOnRedTeam(agentIndex):
otherTeam = state.getBlueTeamIndices()
else:
otherTeam = state.getRedTeamIndices()
if agentState.isPacman:
for index in otherTeam:
otherAgentState = state.data.agentStates[index]
if otherAgentState.isPacman: continue
ghostPosition = otherAgentState.getPosition()
if ghostPosition == None: continue
if manhattanDistance( ghostPosition, agentState.getPosition() ) <= COLLISION_TOLERANCE:
#award points to the other team for killing Pacmen
if otherAgentState.scaredTimer <= 0:
score = KILL_POINTS
if state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
agentState.isPacman = False
agentState.configuration = agentState.start
agentState.scaredTimer = 0
else:
score = KILL_POINTS
if state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
otherAgentState.isPacman = False
otherAgentState.configuration = otherAgentState.start
otherAgentState.scaredTimer = 0
else: # Agent is a ghost
for index in otherTeam:
otherAgentState = state.data.agentStates[index]
if not otherAgentState.isPacman: continue
pacPos = otherAgentState.getPosition()
if pacPos == None: continue
if manhattanDistance( pacPos, agentState.getPosition() ) <= COLLISION_TOLERANCE:
#award points to the other team for killing Pacmen
if agentState.scaredTimer <= 0:
score = KILL_POINTS
if not state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
otherAgentState.isPacman = False
otherAgentState.configuration = otherAgentState.start
otherAgentState.scaredTimer = 0
else:
score = KILL_POINTS
if state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
agentState.isPacman = False
agentState.configuration = agentState.start
agentState.scaredTimer = 0
checkDeath = staticmethod( checkDeath )
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod( placeGhost )
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None or str == '': return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key,val = p, 1
opts[key] = val
return opts
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python capture.py
- starts a game with two baseline agents
(2) python capture.py --keys0
- starts a two-player interactive game where the arrow keys control agent 0, and all other agents are baseline agents
(3) python capture.py -r baselineTeam -b myTeam
- starts a fully automated game where the red team is a baseline team and blue team is myTeam
"""
parser = OptionParser(usageStr)
parser.add_option('-r', '--red', help=default('Red team'),
default='baselineTeam')
parser.add_option('-b', '--blue', help=default('Blue team'),
default='baselineTeam')
parser.add_option('--redOpts', help=default('Options for red team (e.g. first=keys)'),
default='')
parser.add_option('--blueOpts', help=default('Options for blue team (e.g. first=keys)'),
default='')
parser.add_option('--keys0', help='Make agent 0 (first red player) a keyboard agent', action='store_true',default=False)
parser.add_option('--keys1', help='Make agent 1 (second red player) a keyboard agent', action='store_true',default=False)
parser.add_option('--keys2', help='Make agent 2 (first blue player) a keyboard agent', action='store_true',default=False)
parser.add_option('--keys3', help='Make agent 3 (second blue player) a keyboard agent', action='store_true',default=False)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout; use RANDOM for a random maze; use RANDOM<seed> to use a specified random seed, e.g., RANDOM23'),
metavar='LAYOUT_FILE', default='defaultCapture')
parser.add_option('--frameTime', type='int')
parser.add_option('-t', '--textgraphics', action='store_true', dest='textgraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quiet', action='store_true',
help='Display minimal output and no graphics', default=False)
parser.add_option('-Q', '--super-quiet', action='store_true', dest="super_quiet",
help='Same as -q but agent output is also suppressed', default=False)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom in the graphics'), default=1)
parser.add_option('-i', '--time', type='int', dest='time',
help=default('TIME limit of a game in moves'), default=1200, metavar='TIME')
parser.add_option('-n', '--numGames', type='int',
help=default('Number of games to play'), default=1)
parser.add_option('-f', '--fixRandomSeed', action='store_true',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('--record', action='store_true',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', default=None,
help='Replays a recorded game file.')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('-c', '--catchExceptions', action='store_true', default=False,
help='Catch exceptions and enforce time limits')
options, otherjunk = parser.parse_args(argv)
assert len(otherjunk) == 0, "Unrecognized options: " + str(otherjunk)
args = dict()
# Choose a display format
#if options.pygame:
# import pygameDisplay
# args['display'] = pygameDisplay.PacmanGraphics()
if options.textgraphics:
import textDisplay
args['display'] = textDisplay.PacmanGraphics()
elif options.quiet:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.super_quiet:
import textDisplay
args['display'] = textDisplay.NullGraphics()
args['muteAgents'] = True
else:
import captureGraphicsDisplay
# Hack for agents writing to the display
captureGraphicsDisplay.FRAME_TIME = 0
args['display'] = captureGraphicsDisplay.PacmanGraphics(options.red, options.blue, options.zoom, 0, capture=True)
import __main__
__main__.__dict__['_display'] = args['display']
args['redTeamName'] = options.red
args['blueTeamName'] = options.blue
if options.fixRandomSeed: random.seed('cs188')
# Special case: recorded games don't use the runGames method or args structure
if options.replay != None:
print 'Replaying recorded game %s.' % options.replay
import cPickle
recorded = cPickle.load(open(options.replay))
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
# Choose a pacman agent
redArgs, blueArgs = parseAgentArgs(options.redOpts), parseAgentArgs(options.blueOpts)
if options.numTraining > 0:
redArgs['numTraining'] = options.numTraining
blueArgs['numTraining'] = options.numTraining
nokeyboard = options.textgraphics or options.quiet or options.numTraining > 0
print '\nRed team %s with %s:' % (options.red, redArgs)
redAgents = loadAgents(True, options.red, nokeyboard, redArgs)
print '\nBlue team %s with %s:' % (options.blue, blueArgs)
blueAgents = loadAgents(False, options.blue, nokeyboard, blueArgs)
args['agents'] = sum([list(el) for el in zip(redAgents, blueAgents)],[]) # list of agents
numKeyboardAgents = 0
for index, val in enumerate([options.keys0, options.keys1, options.keys2, options.keys3]):
if not val: continue
if numKeyboardAgents == 0:
agent = keyboardAgents.KeyboardAgent(index)
elif numKeyboardAgents == 1:
agent = keyboardAgents.KeyboardAgent2(index)
else:
raise Exception('Max of two keyboard agents supported')
numKeyboardAgents += 1
args['agents'][index] = agent
# Choose a layout
import layout
if options.layout.startswith('RANDOM'):
args['layout'] = layout.Layout(randomLayout(int(options.layout[6:])).split('\n'))
elif options.layout.lower().find('capture') == -1:
raise Exception( 'You must use a capture layout with capture.py')
else:
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
args['length'] = options.time
args['numGames'] = options.numGames
args['numTraining'] = options.numTraining
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
return args
def randomLayout(seed = None):
if not seed:
seed = random.randint(0,99999999)
# layout = 'layouts/random%08dCapture.lay' % seed
# print 'Generating random layout in %s' % layout
import mazeGenerator
return mazeGenerator.generateMaze(seed)
import traceback
def loadAgents(isRed, factory, textgraphics, cmdLineArgs):
"Calls agent factories and returns lists of agents"
try:
module = __import__(factory)
except ImportError:
print 'Error: The team "' + factory + '" could not be loaded! '
traceback.print_exc()
return [None for i in range(2)]
args = dict()
args.update(cmdLineArgs) # Add command line args with priority
print "Loading Team:", factory
print "Arguments:", args
# if textgraphics and factoryClassName.startswith('Keyboard'):
# raise Exception('Using the keyboard requires graphics (no text display, quiet or training games)')
try:
createTeamFunc = getattr(module, 'createTeam')
except AttributeError:
print 'Error: The team "' + factory + '" could not be loaded! '
traceback.print_exc()
return [None for i in range(2)]
indexAddend = 0
if not isRed:
indexAddend = 1
indices = [2*i + indexAddend for i in range(2)]
return createTeamFunc(indices[0], indices[1], isRed, **args)
def replayGame( layout, agents, actions, display, length, redTeamName, blueTeamName ):
rules = CaptureRules()
game = rules.newGame( layout, agents, display, length, False, False )
state = game.state
display.redTeam = redTeamName
display.blueTeam = blueTeamName
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor( *action )
# Change the display
display.update( state.data )
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames( layout, agents, display, length, numGames, record, numTraining, redTeamName, blueTeamName, muteAgents=False, catchExceptions=False ):
rules = CaptureRules()
games = []
if numTraining > 0:
print 'Playing %d training games' % numTraining
for i in range( numGames ):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
g = rules.newGame( layout, agents, gameDisplay, length, muteAgents, catchExceptions )
g.run()
if not beQuiet: games.append(g)
g.record = None
if record:
import time, cPickle, game
#fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
#f = file(fname, 'w')
components = {'layout': layout, 'agents': [game.Agent() for a in agents], 'actions': g.moveHistory, 'length': length, 'redTeamName': redTeamName, 'blueTeamName':blueTeamName }
#f.close()
print "recorded"
g.record = cPickle.dumps(components)
with open('replay','wb') as f:
f.write(g.record)
if numGames > 0:
scores = [game.state.data.score for game in games]
redWinRate = [s > 0 for s in scores].count(True)/ float(len(scores))
blueWinRate = [s < 0 for s in scores].count(True)/ float(len(scores))
print 'Average Score:', sum(scores) / float(len(scores))
print 'Scores: ', ', '.join([str(score) for score in scores])
print 'Red Win Rate: %d/%d (%.2f)' % ([s > 0 for s in scores].count(True), len(scores), redWinRate)
print 'Blue Win Rate: %d/%d (%.2f)' % ([s < 0 for s in scores].count(True), len(scores), blueWinRate)
print 'Record: ', ', '.join([('Blue', 'Tie', 'Red')[max(0, min(2, 1 + s))] for s in scores])
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python capture.py
See the usage string for more details.
> python capture.py --help
"""
options = readCommand( sys.argv[1:] ) # Get game components based on input
runGames(**options)
# import cProfile
# cProfile.run('runGames( **options )', 'profile')
| [
"[email protected]"
] | |
769d5f7fe77535a00d206add921664f0c7bd61e3 | 63913055f86d625786196a880c1d8f82b1b569d5 | /haddScript.py | 5089d9ba93c7f102a326c468bfac108330b3ece4 | [] | no_license | mroguljic/X_YH_4b | 328791db1449d5ddef8495df3e0ad8a30aeefba3 | 78ba7980058bd7759354182c685baf605a4e8a8d | refs/heads/master | 2022-11-10T15:09:56.836525 | 2021-09-29T14:35:46 | 2021-09-29T14:35:46 | 248,929,562 | 0 | 3 | null | 2020-12-23T08:18:44 | 2020-03-21T07:44:38 | Python | UTF-8 | Python | false | false | 962 | py | import os
directories=[d for d in os.listdir(os.getcwd()) if os.path.isdir(d)]
if("semi" in os.getcwd()):
semil = True
variations = ["nom","sfUp","sfDown","jesUp","jesDown","jerUp","jerDown"] #CR
else:
semil = False
variations = ["nom","jesUp","jesDown","jerUp","jerDown","jmsUp","jmsDown","jmrUp","jmrDown","MJYrotUp","MJYrotDown"]
for d in directories:
if("MX" in d):
continue
for variation in variations:
if("MJYrot" in variation and not "TTbar" in d):
continue
if(semil):
if(variation!="nom" and "Single" in d):
continue
else:
if(variation!="nom" and not ("MX" in d or "TTbar" in d)):
continue
if("MX" in d):
cmd = "mv {0}/{0}_0_{1}.root {0}_{1}.root".format(d,variation)
else:
cmd = "hadd -f {0}_{1}.root {0}/*{1}*root".format(d,variation)
print(cmd)
os.system(cmd)
| [
"[email protected]"
] | |
a5982b982057004ee234ce100b0ddd970474ad7f | dcf8f4326d4a535c33316df5135fdfabaede5785 | /roles/mafia.py | 0d56a43e846f51b25e235871790c61e17a3b0697 | [] | no_license | andrewbarry1/MafiaEngine | bcc27dc0e7e6d20118bd173880d5a14a700da21a | c03e79a4fe3748128f63988350d2d21bb767623b | refs/heads/master | 2020-04-11T01:14:02.479049 | 2018-12-04T00:09:45 | 2018-12-04T00:09:45 | 124,316,789 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | from role import Role
from visit import Visit
from enums import *
from voting import vote
class Mafia(Role):
def __init__(self):
self.name = "Mafia"
self.alignment = Alignment.mafia
self.night_chat = Meeting.mafnight
def __str__(self):
return "Mafia"
def get_night_vote(self):
return self.room.gen_vote_list(self.player, "not mafia")
def process_night_vote(self, votes):
(counts, target) = vote(votes)
if target == VOTE_NL: # tied vote or nl - same thing
return []
else:
return [Visit(self.player.player_number, target, self.nightkill, VisitPriority.Kill)]
def nightkill(self, visitor, visited):
self.room.players[visited].kill()
def check_win(self, alive):
n_maf = len([p for p in alive if p.role.alignment == Alignment.mafia])
n_town = len(alive) - n_maf
if (n_maf >= n_town):
return "Mafia"
else:
return None
| [
"[email protected]"
] | |
fe0bd2ceaf4493e021a319b6698c83f78f07f01e | dce2e3b11804fdb141feaa48299fa8cd751f0e5d | /2.两数相加.py | ad357aa9fedb490291ad6f56660209fd8858a61c | [] | permissive | Cosmos-Break/leetcode | bf056efb6f3eb6448df7fb3fc4869992a3e7eb48 | 9f5f3d24e35b0a482ed40594ea665e9068324dcc | refs/heads/main | 2023-06-26T04:29:25.135826 | 2021-07-19T12:29:29 | 2021-07-19T12:29:29 | 293,397,157 | 0 | 0 | MIT | 2020-09-07T01:55:39 | 2020-09-07T01:55:38 | null | UTF-8 | Python | false | false | 785 | py | #
# @lc app=leetcode.cn id=2 lang=python3
#
# [2] 两数相加
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(l1.val + l2.val)
cur = head
while l1.next or l2.next:
l1 = l1.next if l1.next else ListNode()
l2 = l2.next if l2.next else ListNode()
cur.next = ListNode(l1.val + l2.val + cur.val // 10)
cur.val = cur.val % 10
cur = cur.next
if cur.val >= 10:
cur.next = ListNode(cur.val // 10)
cur.val = cur.val % 10
return head
# @lc code=end
| [
"[email protected]"
] | |
d543e5a724ba3f971cadba8221431be97ee26b64 | 7bb6e5319aa544d2d45e2b2f241c13d57f83c1b8 | /PyLove 1.5 Objective programming 1/task 5.2.py | e8d1a0ffeb511f653d62a88814842f87faf819e7 | [] | no_license | KFranciszek/PyLove-Python-Workshops | 64bf52c3e9237f7b90ff4024853c00013f563b85 | 96c0bc237e41dea637b3e58eecdcad9607c26818 | refs/heads/master | 2021-05-02T09:15:42.014449 | 2017-12-05T19:16:05 | 2017-12-05T19:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # Create a constructor (__init__) for a class "Human", which will take a name, height and weight.
class Human:
def __init__(self, name, height, weight):
self.name = name
self.height = height
self.weight = weight
human = Human("John", "175", "70")
print(human.name) | [
"[email protected]"
] | |
e94dcc4d0f738803109dfc73df5e2aa1f11e6c57 | f1562ff1c4b03742b3595cded685378f2bbec4e6 | /src/run.py | bf9ebdedb992a949f530439682a44401bf31ae56 | [] | no_license | bilguunchinzorigEPFL/temtseen | 8a95178146127e3d3255f8d94c4a115c495e4833 | 2ba8e624f41ee8c4bd180719b12ea5e7d4604f5b | refs/heads/master | 2022-04-13T11:35:23.372802 | 2020-04-03T12:48:15 | 2020-04-03T12:48:15 | 249,346,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,072 | py | import os
import resource
import sys
import time
import warnings
from datetime import datetime
import pandas as pd
import dataHandler as data
from deltaRegressors import GetRegressor
from evaluators import evaluate
from featureGenerators import GetFeatureGroup, GetAllFeatureGroupNames
warnings.filterwarnings("ignore")
def memory_limit():
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (int(get_memory() * 1024 * 0.9), hard))
def get_memory():
with open('/proc/meminfo', 'r') as mem:
free_memory = 0
for i in mem:
sline = i.split()
if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
free_memory += int(sline[1])
return free_memory
memory_limit()
# configs
runName = "testRun"
timestamp = str(datetime.now())
featureGroupNames = GetAllFeatureGroupNames()
deltaRegressorNames = [
# 'MeanRegressor',
# 'MeanConfRegressor',
# 'LassoLars',
# 'GaussianProcessRegressor',
# 'LinearRegression',
'RandomForestRegressor',
# 'GradientBoostingRegressor',
# 'LinearSVR',
# 'MLPRegressor',
# 'ElasticNet',
]
regressorMode = "unit"
savePath = "../model/" + runName + "/"
data.dataName = "testM"
# init
print(f"Loading Data From: {data.dataName}")
featureGroups = [GetFeatureGroup(fg) for fg in featureGroupNames]
deltaRegressors = [GetRegressor(dr, regressorMode) for dr in deltaRegressorNames]
if not os.path.exists(savePath):
os.mkdir(savePath)
with open(savePath + "results.csv", "w") as myfile:
myfile.write(
f"path, timestamp, trainTdError, trainTsError, evalTdError, evalTsError, featureGroupNames, data\n")
# training
trainFeatures = pd.concat([fg.getFeatures("train") for fg in featureGroups], axis=1)
trainX = data.loadX('train')
trainTimeDelta = data.calcTimeDelta(trainX)
evalFeatures = pd.concat([fg.getFeatures("eval") for fg in featureGroups], axis=1)
evalX = data.loadX('eval')
evalTimeDelta = data.calcTimeDelta(evalX)
for i, deltaRegressorName in enumerate(deltaRegressorNames):
sys.stdout.write(f"Training Regressor: {deltaRegressorName} : ")
sys.stdout.flush()
s = time.time()
#deltaRegressors[i].load(savePath + deltaRegressorName + "T2020-03-26 17:18:13.117775")
deltaRegressors[i].train(trainFeatures, trainTimeDelta)
print(f"Time : {time.time() - s}")
trainTdError, trainTsError = evaluate(
"Train " + deltaRegressorName,
deltaRegressors[i],
trainFeatures,
trainTimeDelta,
trainX
)
evalTdError, evalTsError = evaluate(
"Eval " + deltaRegressorName,
deltaRegressors[i],
evalFeatures,
evalTimeDelta,
evalX
)
deltaRegressors[i].save(savePath + deltaRegressorName + "T" + timestamp)
with open(savePath + "results.csv", "a") as myfile:
myfile.write(
f"{savePath + deltaRegressorName} T {timestamp}, {timestamp}, {trainTdError}, {trainTsError}, {evalTdError}, {evalTsError}, {featureGroupNames}, {data.dataName}\n")
| [
"[email protected]"
] | |
cdfa3f2f6875bdb4d6aad570e6f6cb1c947e4d52 | 7dab00e63b7193010344a0f05e0cc641d7091f5f | /students/Zhengtang_Yang/lesson06/calculator/calculator/calculator.py | 088b150c43dd11e6331407d9bf0eb71dbda3174f | [] | no_license | aurel1212/Sp2018-Online | 9307e872c14c5ddd795bdc738b325de087895d55 | 263685ca90110609bfd05d621516727f8cd0028f | refs/heads/master | 2020-04-05T18:35:49.761140 | 2018-06-19T18:24:27 | 2018-06-19T18:24:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | """This module demonstrates documentation as specified """
from .exceptions import InsufficientOperands
class Calculator(object):
"""
Calculator Class
"""
def __init__(self, adder, subtracter, multiplier, divider):
self.adder = adder
self.subtracter = subtracter
self.multiplier = multiplier
self.divider = divider
self.stack = []
def enter_number(self, number):
# self.stack.insert(0, number)
self.stack.append(number)
def _do_calc(self, operator):
try:
result = operator.calc(self.stack[0], self.stack[1])
except IndexError:
raise InsufficientOperands
self.stack = [result]
return result
def add(self):
return self._do_calc(self.adder)
def subtract(self):
return self._do_calc(self.subtracter)
def multiply(self):
return self._do_calc(self.multiplier)
def divide(self):
return self._do_calc(self.divider)
| [
"[email protected]"
] | |
9442061d1c5d28bd09a835998a2e53cfa07e48e2 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py | 219372a39b6b37e617c2e86dffba37acfa9ed26a | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 736 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class EventBoardsAwardsOverlayMeta(BaseDAAPIComponent):
def changeFilter(self, id):
self._printOverrideError('changeFilter')
def as_setHeaderS(self, data):
return self.flashObject.as_setHeader(data) if self._isDAAPIInited() else None
def as_setVehicleS(self, data):
return self.flashObject.as_setVehicle(data) if self._isDAAPIInited() else None
def as_setDataS(self, data):
return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
| [
"[email protected]"
] | |
6f1f8161ba95d3088ba7f50b93a121664fb1a322 | 57abd17391c6ef691509dae512c102f6635dab9b | /tensorflow_datasets/scripts/create_new_dataset.py | 6e57f703111ebe42c66b6fd4f7d3415e908e0bac | [
"Apache-2.0"
] | permissive | SinghKislay/datasets | 434e50eb3b8584849192f3cabe7305429cc62363 | bc09dd59826975f57c861da4bea23fa5d63d61cf | refs/heads/master | 2020-05-02T22:27:34.771036 | 2019-04-10T18:14:41 | 2019-04-10T18:14:41 | 176,097,632 | 0 | 0 | Apache-2.0 | 2019-03-17T12:25:56 | 2019-03-17T12:25:55 | null | UTF-8 | Python | false | false | 6,919 | py | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate the minimal source code for a new dataset.
python -m tensorflow_datasets.scripts.create_new_dataset \
--dataset dataset_name \
--type dataset_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow.io import gfile
from tensorflow_datasets.core import naming
from tensorflow_datasets.core.utils import py_utils
FLAGS = flags.FLAGS
_DATASET_TYPE = ['image', 'video', 'audio', 'text', 'structured', 'translate']
flags.DEFINE_string('tfds_dir', None, 'Root directory of tfds (auto-computed)')
flags.DEFINE_string('dataset', None, 'Dataset name')
flags.DEFINE_enum('type', None, _DATASET_TYPE, 'Dataset type')
_HEADER = """\
\"""{TODO}: Add a description here.\"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
_DATASET_DEFAULT_IMPORTS = """\
import tensorflow_datasets as tfds\n
"""
_DATASET_TEST_DEFAULTS_IMPORTS = """\
from tensorflow_datasets import testing
from tensorflow_datasets.{dataset_type} import {dataset_name}
"""
_CITATION = """\
# {TODO}: BibTeX citation
_CITATION = \"""
\"""\n
"""
_DESCRIPTION = """\
# {TODO}:
_DESCRIPTION = \"""
\"""\n
"""
_DATASET_DEFAULTS = """\
class {dataset_cls}(tfds.core.GeneratorBasedBuilder):
\"""{TODO}: Short description of my dataset.\"""
# {TODO}: Set up version.
VERSION = tfds.core.Version('0.1.0')
def _info(self):
# {TODO}: Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({{
# These are the features of your dataset like images, labels ...
}}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=(),
# Homepage of the dataset for documentation
urls=[],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# {TODO}: Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# {TODO}: Tune the number of shards such that each shard
# is < 4 GB.
num_shards=10,
# These kwargs will be passed to _generate_examples
gen_kwargs={{}},
),
]
def _generate_examples(self):
# {TODO}: Yields examples from the dataset
yield {{}}\n
"""
_DATASET_TEST_DEFAULTS = """\
class {dataset_cls}Test(testing.DatasetBuilderTestCase):
# {TODO}:
DATASET_CLASS = {dataset_name}.{dataset_cls}
SPLITS = {{
"train": 3, # Number of fake train example
"test": 1, # Number of fake test example
}}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({{'some_key': 'http://a.org/out.txt', ...}})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {{'some_key': 'output_file1.txt', ...}}
if __name__ == "__main__":
testing.test_main()
"""
_CHECKSUM_FILE = """\
# {TODO}: If your dataset downloads files, then the checksums will be
# automatically added here when running the download_and_prepare script
# with --register_checksums.
"""
def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFile(init_file.format(**data), 'a') as f:
f.write(context.format(**data))
def create_dataset_test_file(root_dir, data):
"""Create the test file associated with the dataset."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}_test.py')
context = (
_HEADER + _DATASET_TEST_DEFAULTS_IMPORTS +
_DATASET_TEST_DEFAULTS)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
def create_fake_data(root_dir, data):
fake_examples_dir = os.path.join(
root_dir, 'testing', 'test_data', 'fake_examples', '{dataset_name}')
fake_examples_dir = fake_examples_dir.format(**data)
gfile.makedirs(fake_examples_dir)
fake_path = os.path.join(
fake_examples_dir, 'TODO-add_fake_data_in_this_directory.txt')
with gfile.GFile(fake_path, 'w') as f:
f.write('{TODO}: Add fake data in this directory'.format(**data))
def create_checksum_file(root_dir, data):
checksum_path = os.path.join(root_dir, 'url_checksums', '{dataset_name}.txt')
with gfile.GFile(checksum_path.format(**data), 'w') as f:
f.write(_CHECKSUM_FILE.format(**data))
def main(_):
dataset_name = FLAGS.dataset
dataset_type = FLAGS.type
root_dir = FLAGS.tfds_dir
if not root_dir:
root_dir = py_utils.tfds_dir()
data = dict(
dataset_name=dataset_name,
dataset_type=dataset_type,
dataset_cls=naming.snake_to_camelcase(dataset_name),
TODO='TODO({})'.format(dataset_name),
)
create_dataset_file(root_dir, data)
add_the_init(root_dir, data)
create_dataset_test_file(root_dir, data)
create_fake_data(root_dir, data)
create_checksum_file(root_dir, data)
print(
'Dataset generated in {}\n'
'You can start with searching TODO({}).\n'
'Please check this '
'`https://github.com/tensorflow/datasets/blob/master/docs/add_dataset.md`'
'for details.'.format(root_dir, dataset_name)
)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
78ce0f7dcfae56d27b83005282d451663d29798d | a11d83fced34854664fac72e18d48fde6aa967e4 | /0x02-python-import_modules/102-magic_calculation.py | 0162921f0da03b752aab68a8227e86622fb4338e | [] | no_license | afarizap/holbertonschool-higher_level_programming | ffe0bf1440726c952f4dd28b908eabc4ccb5225b | ad39e58f9cb20cba4b9e2c14075f216097588f47 | refs/heads/master | 2023-03-30T15:39:35.184484 | 2021-03-22T22:55:24 | 2021-03-22T22:55:24 | 259,437,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #!/usr/bin/python3
def magic_calculation(a, b):
from magic_calculation_102 import add, sub
if a < b:
c = add(a, b)
for i in range(4, 6):
c = add(c, i)
return c
return sub(a, b)
if __name__ == '__main__':
import dis
dis.dis(magic_calculation)
| [
"[email protected]"
] | |
2fedba0cbdc8a5b29280723b6940c2f71a7bda36 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03546/s442051818.py | 0f991e8c96376c793a2bab1a38286d3a650be9e2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | h, _ = map(int, input().split())
r = range(10)
c = [[int(i) for i in input().split()] for _ in r]
for k in r:
for i in r:
for j in r:
c[i][j] = min(c[i][j], c[i][k] + c[k][j])
else:
a = [[int(i) for i in input().split()] for _ in range(h)]
print(sum(c[i][1] for i in sum(a, []) if i != -1)) | [
"[email protected]"
] | |
038a56b6976ac7b4e464d15987f277155fce3956 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200703_python1/day13_py200814/output_1.py | 5599e7b1ca607505095d6c9b8c9ce7737df9672c | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | """
output format()
output formatting with placeholders
string.format()
string template
placeholder
"""
x = 1 + 3*4
y = 2 + 5*6
# not recommended
print('x=', x, ',', 'y=', y)
# recommended
print("x={} , y={}")
print("x={} , y={}".format(x, y))
print("x={},y={}".format(x, y))
print("x={}, y={}".format(x, y))
| [
"[email protected]"
] | |
da82e269a3a1dfb6e360d2f25b7e3108dd81eb6f | 261a12381f07cebfdea7a546fc0337f70a116596 | /scripts/deploy.py | 6c499b800bf324774211c3b95b592d32d44d2964 | [] | no_license | MatthewTang/brownie-simple-storage | 678ca4dd3107a90628ef333aecaba57d94f66a73 | 6fd12620306fcb674a84e62212775d089853ff82 | refs/heads/main | 2023-08-28T05:40:07.014041 | 2021-10-07T09:06:32 | 2021-10-07T09:06:32 | 414,139,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | from brownie import accounts, config, SimpleStorage, network
# import os
def deploy_simple_storage():
"""
# ganache
account = accounts[0]
print(account)
# brownie
account = accounts.load("freecodecamp-account")
print(account)
# .env file
# account = accounts.add(os.getenv("PRIVATE_KEY")) # (?) account not same
account = accounts.add(config["wallets"]["from_key"])
print(account)
"""
account = get_account()
# Transact/ Call ? Brownie automatically knows
simple_storage = SimpleStorage.deploy({"from": account})
store_value = simple_storage.retrieve()
print(store_value)
transaction = simple_storage.store(15, {"from": account})
transaction.wait(1)
updated_store_value = simple_storage.retrieve()
print(updated_store_value)
def get_account():
if network.show_active() == "development":
return accounts[0]
else:
return accounts.add(config["wallets"]["from_key"])
def main():
deploy_simple_storage()
| [
"[email protected]"
] | |
37d56eb642affafa7f89e93d58f25e90355edf99 | c483e2d8ace057eb493377dd916b7aa65054424c | /Module9/definitions/set_ops.py | b5563417d84d9f5566170e0d0b442b8fd184a97b | [] | no_license | puckdisc/Python | 204cf379f23ff7129f6b9e7a56a54012ff4d528c | e215c6922dacd7dae29aae982a013e7b57680cae | refs/heads/master | 2023-02-09T05:01:45.645266 | 2021-01-02T03:14:28 | 2021-01-02T03:14:28 | 293,377,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | def print_set(hockey_set):
for x in hockey_set:
print(x)
| [
"[email protected]"
] | |
3a2ef9fe9d1ee5ac10f8616a6a5a5e7696776e60 | da85c029410ee7d0c7474269d68f568caabb7e5d | /PrimeNo.py | f5fcf0a49da74b5d4176944f5450ceb4d164fd59 | [] | no_license | apoorvasrivastava98/Python | 65f561c90a24c2a6c21cf0c31808a45c7561320c | 1c206fd059d912f1037d5c2fa26b447c574daa6d | refs/heads/master | 2021-01-15T00:59:37.465295 | 2021-01-09T08:47:46 | 2021-01-09T08:47:46 | 242,821,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | def prime(num):
c=0
for i in range (1,num+1):
if(num % i == 0):
c=c+1
if (c == 2):
print("Prime number")
else:
print("Not prime number")
prime(15)
| [
"[email protected]"
] | |
288a88fbeb711f83a092c56afc5ce0b632d920e7 | 70a5391312947e5e80a077e4c83d79488542f9e8 | /test/daplink_board.py | 8e02fda12481ce07fa56deedcbcd5edb33ed2180 | [
"Apache-2.0"
] | permissive | c1728p9/DAPLink | c6fe99c75d5be4b18529bc2cee2717d704592b38 | 8e14abbfaeeffab78d7307e22445efaa58501411 | refs/heads/master | 2021-08-15T12:55:58.132453 | 2016-03-18T21:23:40 | 2016-03-18T21:23:40 | 54,351,174 | 4 | 1 | null | 2016-03-21T01:39:36 | 2016-03-21T01:36:22 | C | UTF-8 | Python | false | false | 23,460 | py | #
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import os
import re
import time
import subprocess
import sys
import binascii
import itertools
import mbed_lstools
import info
import test_daplink
from test_info import TestInfoStub
from intelhex import IntelHex
from pyOCD.board import MbedBoard
FILE_IGNORE_PATTERN_LIST = [
re.compile("\\._\\.Trashes")
]
# This prevents the following error message from getting
# displayed on windows if the mbed dismounts unexpectedly
# during a transfer:
# There is no disk in the drive. Please insert a disk into
# drive \Device\<Harddiskx>\<rdrive>
def disable_popup():
if sys.platform.startswith("win"):
# pylint: disable=invalid-name
import ctypes
SEM_FAILCRITICALERRORS = 1
GetErrorMode = \
ctypes.windll.kernel32.GetErrorMode # @UndefinedVariable
GetErrorMode.restype = ctypes.c_uint
GetErrorMode.argtypes = []
SetErrorMode = \
ctypes.windll.kernel32.SetErrorMode # @UndefinedVariable
SetErrorMode.restype = ctypes.c_uint
SetErrorMode.argtypes = [ctypes.c_uint]
err_mode = GetErrorMode()
err_mode |= SEM_FAILCRITICALERRORS
SetErrorMode(err_mode)
disable_popup()
def get_all_attached_daplink_boards():
all_boards = []
lstools = mbed_lstools.create()
mbed_list = lstools.list_mbeds()
for mbed in mbed_list:
unique_id = mbed['target_id']
board = DaplinkBoard(unique_id)
all_boards.append(board)
return all_boards
def _unique_id_to_host_id(unique_id):
"""Return the chip id unique to the daplink host procesor
Unique ID has the following fomat
Board ID - 4 bytes
Version - 4 bytes
Host ID - Everything else
"""
return unique_id[8:8 + 32]
def _get_board_endpoints(unique_id):
"""Return a tuple of unique_id, serial_port, mount_point"""
lstools = mbed_lstools.create()
mbed_list = lstools.list_mbeds()
host_id = _unique_id_to_host_id(unique_id)
for mbed in mbed_list:
mbed_unique_id = mbed['target_id']
mbed_serial_port = mbed['serial_port']
mbed_mount_point = mbed['mount_point']
mbed_host_id = _unique_id_to_host_id(mbed_unique_id)
if mbed_host_id == host_id:
return mbed_unique_id, mbed_serial_port, mbed_mount_point
return None
def _ranges(i):
for _, b in itertools.groupby(enumerate(i), lambda x_y: x_y[1] - x_y[0]):
b = list(b)
yield b[0][1], b[-1][1]
def _parse_kvp_file(file_path, parent_test=None):
"""Parse details.txt and return True if successful"""
test_info = None
kvp = {}
if parent_test is not None:
test_info = parent_test.create_subtest('parse_kvp_file')
line_format = re.compile("^([a-zA-Z0-9 ]+): +(.+)$")
if not os.path.isfile(file_path):
return kvp
with open(file_path, "r") as file_handle:
for line in file_handle:
if len(line) <= 0:
if test_info is not None:
test_info.failure("Empty line in %s" % file_path)
continue
if line[0] == '#':
# The line is a comment
continue
match = line_format.match(line)
if match is None:
if test_info is not None:
test_info.failure("Invalid line: %s" % line)
continue
key = match.group(1)
key = key.lower().replace(" ", "_")
value = match.group(2)
value = value.lower()
value = value.strip()
if key in kvp:
if test_info is not None:
test_info.failure("Duplicate key %s" % key)
continue
kvp[key] = value
return kvp
def _compute_crc(hex_file_path):
# Read in hex file
new_hex_file = IntelHex()
new_hex_file.padding = 0xFF
new_hex_file.fromfile(hex_file_path, format='hex')
# Get the starting and ending address
addresses = new_hex_file.addresses()
addresses.sort()
start_end_pairs = list(_ranges(addresses))
regions = len(start_end_pairs)
assert regions == 1, ("Error - only 1 region allowed in "
"hex file %i found." % regions)
start, end = start_end_pairs[0]
# Compute checksum over the range (don't include data at location of crc)
size = end - start + 1
crc_size = size - 4
data = new_hex_file.tobinarray(start=start, size=crc_size)
data_crc32 = binascii.crc32(data) & 0xFFFFFFFF
# Grab the crc from the image
embedded_crc32 = (((new_hex_file[end - 3] & 0xFF) << 0) |
((new_hex_file[end - 2] & 0xFF) << 8) |
((new_hex_file[end - 1] & 0xFF) << 16) |
((new_hex_file[end - 0] & 0xFF) << 24))
return data_crc32, embedded_crc32
class AssertInfo(object):
def __init__(self, file_name, line_number):
self._file = file_name
self._line = line_number
@property
def file(self):
return self._file
@property
def line(self):
return self._line
class DaplinkBoard(object):
MODE_IF = "interface"
MODE_BL = "bootloader"
# Keys for details.txt
KEY_UNIQUE_ID = "unique_id"
KEY_HIC_ID = "hic_id"
KEY_MODE = "daplink_mode"
KEY_BL_VERSION = "bootloader_version"
KEY_IF_VERSION = "interface_version"
KEY_GIT_SHA = "git_sha"
KEY_LOCAL_MODS = "local_mods"
KEY_USB_INTERFACES = "usb_interfaces"
KEY_BL_CRC = "bootloader_crc"
KEY_IF_CRC = "interface_crc"
def __init__(self, unique_id):
self.unique_id = unique_id
self.details_txt = None
self._mode = None
self._assert = None
self._check_fs_on_remount = False
self._manage_assert = False
self._update_board_info()
def __str__(self):
return "Name=%s Unique ID=%s" % (self.name, self.get_unique_id())
def get_unique_id(self):
return self.unique_id
def get_board_id(self):
return self.board_id
@property
def hic_id(self):
return self._hic_id
@property
def name(self):
if self.board_id in info.BOARD_ID_TO_BUILD_TARGET:
board_target = info.BOARD_ID_TO_BUILD_TARGET[self.board_id]
else:
board_target = "Unknown"
return board_target
def get_serial_port(self):
return self.serial_port
def get_mount_point(self):
return self.mount_point
def get_connected(self):
"""Check if the board is connected"""
return os.path.isdir(self.mount_point)
def get_failure_message(self):
"""Get the failure message from fail.txt
return None if there there is no failure
"""
msg = None
fail_file = self.get_file_path('FAIL.TXT')
if not self.get_connected():
raise Exception('Board not connected')
if os.path.isfile(fail_file):
with open(fail_file, 'rb') as fail_file_handle:
msg = fail_file_handle.read()
return msg
def get_assert_info(self):
"""Return an AssertInfo if an assert occurred, else None"""
return self._assert
def get_mode(self):
"""Return either MODE_IF or MODE_BL"""
assert ((self._mode is DaplinkBoard.MODE_BL) or
(self._mode is DaplinkBoard.MODE_IF))
return self._mode
def get_file_path(self, file_name):
"""Convenience function to the path to a file on the drive"""
return os.path.normpath(self.mount_point + os.sep + file_name)
def set_mode(self, mode, parent_test=None):
"""Set the mode to either MODE_IF or MODE_BL"""
assert ((mode is DaplinkBoard.MODE_BL) or
(mode is DaplinkBoard.MODE_IF))
if parent_test is None:
parent_test = TestInfoStub()
test_info = parent_test.create_subtest('set_mode')
current_mode = self.get_mode()
if current_mode is mode:
# No mode change needed
return
if mode is self.MODE_BL:
test_info.info("changing mode IF -> BL")
# Create file to enter BL mode
start_bl_path = self.get_file_path('START_BL.ACT')
with open(start_bl_path, 'wb') as _:
pass
# Create file to enter BL mode - Legacy
start_bl_path = self.get_file_path('START_BL.CFG')
with open(start_bl_path, 'wb') as _:
pass
elif mode is self.MODE_IF:
test_info.info("changing mode BL -> IF")
# Create file to enter IF mode
start_if_path = self.get_file_path('START_IF.ACT')
with open(start_if_path, 'wb') as _:
pass
# Create file to enter IF mode - Legacy
start_if_path = self.get_file_path('START_IF.CFG')
with open(start_if_path, 'wb') as _:
pass
else:
test_info.warning("Board is in unknown mode")
self.wait_for_remount(test_info)
new_mode = self.get_mode()
if new_mode != mode:
test_info.failure("Board in wrong mode: %s" % new_mode)
raise Exception("Could not change board mode")
def set_check_fs_on_remount(self, enabled):
assert isinstance(enabled, bool)
self._check_fs_on_remount = enabled
self.set_assert_auto_manage(enabled)
def set_assert_auto_manage(self, enabled):
assert isinstance(enabled, bool)
self.clear_assert()
self._manage_assert = enabled
def clear_assert(self):
assert_path = self.get_file_path("ASSERT.TXT")
if os.path.isfile(assert_path):
os.remove(assert_path)
self.wait_for_remount(TestInfoStub())
def run_board_test(self, parent_test):
test_daplink.daplink_test(self, parent_test)
def read_target_memory(self, addr, size, resume=True):
assert self.get_mode() == self.MODE_IF
with MbedBoard.chooseBoard(board_id=self.get_unique_id()) as board:
data = board.target.readBlockMemoryUnaligned8(addr, size)
board.uninit(resume)
return bytearray(data)
def test_fs(self, parent_test):
"""Check if the raw filesystem is valid"""
if sys.platform.startswith("win"):
test_info = parent_test.create_subtest('test_fs')
args = ["chkdsk", self.mount_point]
process = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate(input='n\r\n') # Answer no if prompted
process.wait()
test_info.info('chkdsk returned %s' % process.returncode)
if process.returncode != 0:
test_info.failure('Disk corrupt')
# TODO - as a future improvement add linux and mac support
# Tests for the following:
# 1. Correct files present -TODO
# 2. Contents of file are valid ascii
# 3. Each line ends with \r\n
# 4. There is no whitespace at the end of the line
# 5. Each file ends with \r\n
def test_fs_contents(self, parent_test):
"""Check if the file contents are valid"""
test_info = parent_test.create_subtest('test_fs_contents')
non_ascii = r'[^\x20-\x7F\r\n]'
non_cr_lf = r'\r[^\n]|[^\r]\n'
trail_white = r'(?:\ \r|\ \n)'
end_of_file = r'\r\n$'
files = os.listdir(self.mount_point)
non_ascii_re = re.compile(non_ascii)
non_cr_lf_re = re.compile(non_cr_lf)
trail_white_re = re.compile(trail_white)
end_of_file_re = re.compile(end_of_file)
for filename in files:
filepath = self.get_file_path(filename)
if not os.path.isfile(filepath):
test_info.info("Skipping non file item %s" % filepath)
continue
skip = False
for pattern in FILE_IGNORE_PATTERN_LIST:
if pattern.match(filename):
skip = True
break
if skip:
continue
with open(filepath, 'rb') as file_handle:
file_contents = file_handle.read()
if non_ascii_re.search(file_contents):
test_info.failure("Non ascii characters in %s" % filepath)
elif non_cr_lf_re.search(file_contents):
test_info.failure("File has non-standard line endings %s" %
filepath)
elif trail_white_re.search(file_contents):
test_info.warning("File trailing whitespace %s" %
filepath)
elif end_of_file_re.search(file_contents) is None:
test_info.warning("No newline at end of file %s" %
filepath)
else:
test_info.info("File %s valid" % filepath)
self.test_details_txt(test_info)
def load_interface(self, filepath, parent_test):
"""Load an interface binary or hex"""
test_info = parent_test.create_subtest('load_interface')
self.set_mode(self.MODE_BL, test_info)
data_crc, crc_in_image = _compute_crc(filepath)
assert data_crc == crc_in_image, ("CRC in interface is wrong "
"expected 0x%x, found 0x%x" %
(data_crc, crc_in_image))
filename = os.path.basename(filepath)
with open(filepath, 'rb') as firmware_file:
data = firmware_file.read()
out_file = self.get_file_path(filename)
start = time.time()
with open(out_file, 'wb') as firmware_file:
firmware_file.write(data)
stop = time.time()
test_info.info("programming took %s s" % (stop - start))
self.wait_for_remount(test_info)
# Check the CRC
self.set_mode(self.MODE_IF, test_info)
if DaplinkBoard.KEY_IF_CRC not in self.details_txt:
test_info.failure("No interface CRC in details.txt")
return
details_crc = int(self.details_txt[DaplinkBoard.KEY_IF_CRC], 0)
test_info.info("Interface crc: 0x%x" % details_crc)
if data_crc != details_crc:
test_info.failure("Interface CRC is wrong")
def load_bootloader(self, filepath, parent_test):
"""Load a bootloader binary or hex"""
test_info = parent_test.create_subtest('load_bootloader')
self.set_mode(self.MODE_IF, test_info)
# Check image CRC
data_crc, crc_in_image = _compute_crc(filepath)
assert data_crc == crc_in_image, ("CRC in bootloader is wrong "
"expected 0x%x, found 0x%x" %
(data_crc, crc_in_image))
filename = os.path.basename(filepath)
with open(filepath, 'rb') as firmware_file:
data = firmware_file.read()
out_file = self.get_file_path(filename)
start = time.time()
with open(out_file, 'wb') as firmware_file:
firmware_file.write(data)
stop = time.time()
test_info.info("programming took %s s" % (stop - start))
self.wait_for_remount(test_info)
# Check the CRC
self.set_mode(self.MODE_IF, test_info)
if DaplinkBoard.KEY_BL_CRC not in self.details_txt:
test_info.failure("No bootloader CRC in details.txt")
return
details_crc = int(self.details_txt[DaplinkBoard.KEY_BL_CRC], 0)
test_info.info("Bootloader crc: 0x%x" % details_crc)
if data_crc != details_crc:
test_info.failure("Bootloader CRC is wrong")
def wait_for_remount(self, parent_test, wait_time=120):
test_info = parent_test.create_subtest('wait_for_remount')
elapsed = 0
start = time.time()
while os.path.isdir(self.mount_point):
if elapsed > wait_time:
raise Exception("Dismount timed out")
time.sleep(0.1)
elapsed += 0.1
stop = time.time()
test_info.info("unmount took %s s" % (stop - start))
start = time.time()
while True:
if self._update_board_info(False):
if os.path.isdir(self.mount_point):
break
if elapsed > wait_time:
raise Exception("Mount timed out")
time.sleep(0.1)
elapsed += 0.1
stop = time.time()
test_info.info("mount took %s s" % (stop - start))
# If enabled check the filesystem
if self._check_fs_on_remount:
self.test_fs(parent_test)
self.test_fs_contents(parent_test)
self.test_details_txt(parent_test)
if self._manage_assert:
if self._assert is not None:
test_info.failure('Assert on line %s in file %s' %
(self._assert.line, self._assert.file))
self.clear_assert()
def _update_board_info(self, exptn_on_fail=True):
"""Update board info
Update all board information variables that could
change when remounting or changing modes.
Note - before this function is set self.unique_id
must be set.
"""
endpoints = _get_board_endpoints(self.unique_id)
if endpoints is None:
if exptn_on_fail:
raise Exception("Could not update board info: %s" %
self.unique_id)
return False
self.unique_id, self.serial_port, self.mount_point = endpoints
# Serial port can be missing
assert self.unique_id is not None
assert self.mount_point is not None
self.board_id = int(self.unique_id[0:4], 16)
self._hic_id = int(self.unique_id[-8:], 16)
# Note - Some legacy boards might not have details.txt
details_txt_path = self.get_file_path("details.txt")
self.details_txt = _parse_kvp_file(details_txt_path)
self._parse_assert_txt()
self.mode = None
if DaplinkBoard.KEY_MODE in self.details_txt:
DETAILS_TO_MODE = {
"interface": DaplinkBoard.MODE_IF,
"bootloader": DaplinkBoard.MODE_BL,
}
mode_str = self.details_txt[DaplinkBoard.KEY_MODE]
self._mode = DETAILS_TO_MODE[mode_str]
else:
# TODO - remove file check when old bootloader have been
# updated
check_bl_path = self.get_file_path('HELP_FAQ.HTM')
check_if_path = self.get_file_path('MBED.HTM')
if os.path.isfile(check_bl_path):
self._mode = self.MODE_BL
elif os.path.isfile(check_if_path):
self._mode = self.MODE_IF
else:
raise Exception("Could not determine board mode!")
return True
def test_details_txt(self, parent_test):
"""Check that details.txt has all requied fields"""
test_info = parent_test.create_subtest('test_details_txt')
required_key_and_format = {
DaplinkBoard.KEY_UNIQUE_ID: re.compile("^[a-f0-9]{48}$"),
DaplinkBoard.KEY_HIC_ID: re.compile("^[a-f0-9]{8}$"),
DaplinkBoard.KEY_GIT_SHA: re.compile("^[a-f0-9]{40}$"),
DaplinkBoard.KEY_LOCAL_MODS: re.compile("^[01]{1}$"),
DaplinkBoard.KEY_USB_INTERFACES: re.compile("^.+$"),
DaplinkBoard.KEY_MODE: re.compile("(interface|bootloader)"),
}
optional_key_and_format = {
DaplinkBoard.KEY_BL_VERSION: re.compile("^[0-9]{4}$"),
DaplinkBoard.KEY_IF_VERSION: re.compile("^[0-9]{4}$"),
DaplinkBoard.KEY_BL_CRC: re.compile("^0x[a-f0-9]{8}$"),
DaplinkBoard.KEY_IF_CRC: re.compile("^0x[a-f0-9]{8}$"),
}
# 1. keys and values are alphanumeric
# 2. no duplicate keys
# 3. format is key : value
# 4. required keys are present
# 5. optional keys have the expected format
details_txt_path = self.get_file_path("details.txt")
details_txt = _parse_kvp_file(details_txt_path, test_info)
if not details_txt:
test_info.failure("Could not parse details.txt")
return
# Check for required keys
for key in required_key_and_format:
if key not in details_txt:
test_info.failure("Missing detail.txt entry: %s" % key)
continue
value = details_txt[key]
pattern = required_key_and_format[key]
if pattern.match(value) is None:
test_info.failure("Bad format detail.txt %s: %s" %
(key, value))
# Check format of optional values
for key in optional_key_and_format:
if key not in details_txt:
continue
value = details_txt[key]
pattern = optional_key_and_format[key]
if pattern.match(value) is None:
test_info.failure("Bad format detail.txt %s: %s" %
(key, value))
# Check details.txt contents
details_unique_id = None
details_hic_id = None
if DaplinkBoard.KEY_UNIQUE_ID in details_txt:
details_unique_id = details_txt[DaplinkBoard.KEY_UNIQUE_ID]
if DaplinkBoard.KEY_HIC_ID in details_txt:
details_hic_id = details_txt[DaplinkBoard.KEY_HIC_ID]
if details_unique_id is not None:
if details_unique_id != self.unique_id:
test_info.failure("Unique ID mismatch in details.txt "
"details.txt=%s, usb=%s" %
(details_unique_id, self.unique_id))
if details_hic_id is not None:
usb_hic = details_unique_id[-8:]
if details_hic_id != usb_hic:
test_info.failure("HIC ID is not the last 8 "
"digits of unique ID "
"details.txt=%s, usb=%s" %
(details_hic_id, usb_hic))
def _parse_assert_txt(self):
file_path = self.get_file_path("ASSERT.TXT")
if not os.path.isfile(file_path):
self._assert = None
return
assert_table = _parse_kvp_file(file_path)
assert "file" in assert_table
assert "line" in assert_table
self._assert = AssertInfo(assert_table["file"], assert_table['line'])
| [
"[email protected]"
] | |
cffddf3d75c1e1ce6fff97c1711d232a66a1205a | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901100012/d07/mymodule/stats_word.py | 02ebfde584c7e1c929c260f80257d92f7a50d67b | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 2,934 | py | en_text='''
The Zen of Python,by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiquity,refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Altough that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain,it's a bad idea.
If the implementation is easy to explain,it's a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
#英文降序
def stats_text_en (text):
eles=text.split()#将文章按照空格划分开
words=[]
sys=".,-,*,!"
for elet in eles:
for s1 in sys:
elet=elet.replace(s1,' ')
if len(elet) and elet.isascii():
words.append(elet)
print(words)
print()
counter={}
word_set=set(words)
for word in word_set:
counter[word]=words.count(word)
print(counter)
print()
return sorted(counter.items(),key=lambda x:x[1],reverse=True)
#中文降序
def stats_text_cn (text):
cn_characters=[]
for character in text:
if '\u4e00'<=character<='\u9fa5':#中文范围
cn_characters.append(character)
counter={}
cn_set=set(cn_characters)
for word in cn_set:
counter[word]=cn_characters.count(word)
return sorted(counter.items(),key=lambda x:x[1],reverse=True)
cn_text='''
Python之禅 by Tim Petters
美丽胜于丑陋
露骨比露骨好
简单总比复杂好
复杂比复杂好
平的比嵌套的好
稀疏比密密好
可读性很重要
特殊情况并不足以打破规则
尽管实用性胜过纯洁性
错误永远不应该悄悄过去
除非明确地沉默
面对橱柜,拒绝诱惑去猜测
应该有一种----最好只有一种----显而易见的方法来做到这一点
如果你不是荷兰人,那么这种方式在一开始可能并不明显
现在总比没有好
虽然从来没有比现在更好
如果实现很难解释,这是一个坏主意
如果实现容易解释,这是一个好主意
命名空间是一个很好的主意--让我们做更多的那些
'''
#输出合并词频统计结果
def stats_text(text):
return stats_text_en(text) + stats_text_cn(text)
#def stats_text(en_text,cn_text):
#print("输出合并词频统计结果\n",stats_text_en(en_text) + stats_text_cn(cn_text))
if __name__=='__main__':
en_result=stats_text_en(en_text)
cn_result=stats_text_cn(cn_text)
print("统计英文次数-->\n",en_result)
print("统计中文次数-->\n",cn_result)
| [
"[email protected]"
] | |
2f23cbd42dee001993bc154511cf225da4760ce6 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/caaa5d634f104a58a218ff663dfc926195e3acaf-<test_notify_sentry_app_and_plugin_with_same_slug>-bug.py | 2bb03ab90a78aded197d894082a935887a555e3a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | def test_notify_sentry_app_and_plugin_with_same_slug(self):
event = self.get_event()
self.create_sentry_app(organization=event.organization, name='Notify', is_alertable=True)
plugin = MagicMock()
plugin.is_enabled.return_value = True
plugin.should_notify.return_value = True
rule = self.get_rule(data={
'service': 'notify',
})
with patch('sentry.plugins.plugins.get') as get_plugin:
get_plugin.return_value = plugin
results = list(rule.after(event=event, state=self.get_state()))
assert (len(results) is 2)
assert (plugin.should_notify.call_count is 1)
assert (results[0].callback is notify_sentry_app)
assert (results[1].callback is plugin.rule_notify) | [
"[email protected]"
] | |
12e965e5fe3133b0e0ef1ebf221c06861acb67ce | b24a15cb3c84e08ba7efec7bb2b906ccf5ad5d1f | /Day4Pt2.py | c6fdfecf0b6b565a12a2bd7d498ef87584a0ff67 | [] | no_license | anmaxwell/AdventofCode2020 | 361a5651a97902465cc3c17bed07a96e2e9ac06b | 385426b9861c1e77e3f03d09eee4a56c3e33366a | refs/heads/main | 2023-02-11T14:45:09.330618 | 2020-12-31T21:55:51 | 2020-12-31T21:55:51 | 322,046,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | import cerberus
from cerberus import Validator
#read in the passport file
output = open("Passport.txt", "r")
#set validation rules using cerberus
schema = {'byr': {'required': True, 'type': 'integer', 'min': 1920, 'max': 2002},
'iyr': {'required': True, 'type': 'integer', 'min': 2010, 'max': 2020},
'eyr': {'required': True, 'type': 'integer', 'min': 2020, 'max': 2030},
'pid': {'required': True, 'type': 'string', 'minlength': 9, 'maxlength': 9},
'hgt': {'required': True, 'type': 'string', 'minlength': 4, 'maxlength': 5},
'hcl': {'required': True, 'type': 'string', 'minlength': 7, 'maxlength': 7,'regex': '^#[0-9a-f]{6}'},
'ecl': {'required': True, 'type': 'string', 'allowed': ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']},
'cid': {'required': False},
}
v = Validator(schema)
#set number of valid passport to 0
validpass=0
#split the file into each passport
myarray = output.read().split("\n\n")
for line in myarray:
partline = line.split()
#create a blank dictionary
my_dict = {}
#take each item and put it in the dictionary
for item in partline:
(key, val) = item.split(':')
#ensure numeric fields converted
try:
my_dict[key] = int(val)
except ValueError:
my_dict[key] = val
#pid has leading zeroes so keep as string for length validation
if key == 'pid':
my_dict[key] = val
#check vlaidates against schema
if v.validate(my_dict):
height = my_dict['hgt']
#check height validation
if height[-2:] == 'cm' and len(height)==5:
hgtval = int(height[:3])
if 150 <= hgtval <= 193:
validpass +=1
if height[-2:] == 'in' and len(height)==4:
hgtval = int(height[:2])
if 59 <= hgtval <= 76:
validpass +=1
print(validpass) | [
"[email protected]"
] | |
82aed50d228c4f45ff91dae2b61a13a01bd7bd66 | 87eed57b13eba5fc10756e705821a2fc861a198e | /bfg9000/platforms/host.py | 65e15de4fb2bd1e765b040415f4de4a8f23600cb | [
"BSD-3-Clause"
] | permissive | jimporter/bfg9000 | 379ac2d9debb822defacc6c5e31d7b65468f0973 | 876966cc82b5520a7bddf88c2a57716c5579b5ba | refs/heads/master | 2023-08-04T06:29:44.669098 | 2023-08-01T03:13:46 | 2023-08-01T03:13:46 | 31,297,691 | 87 | 21 | BSD-3-Clause | 2020-08-06T06:38:10 | 2015-02-25T04:47:12 | Python | UTF-8 | Python | false | false | 334 | py | from .core import _get_platform_info, _platform_info, Platform
class HostPlatform(Platform):
pass
def platform_info(*args, **kwargs):
return _platform_info('host', *args, **kwargs)
def from_json(value):
return _get_platform_info('host', value['genus'], value['species'],
value['arch'])
| [
"[email protected]"
] | |
63db8074328ec2df1eecc4a39d1ff56e9a2dbc8d | 17fe55181333f2b26dae99d1bac6ad738a5d259c | /humantvf | 63f0a6d31bfd8497c33ae5615ce0f23b7deb67bc | [] | no_license | ac5tin/pyscripts | 419e5fc4ab1e2d967e1b2cec3fe595524239d8a4 | dedbcbbef39c3c1b58d8964e751f8bc87c03ca6c | refs/heads/master | 2021-08-30T22:51:50.218596 | 2017-12-19T18:16:12 | 2017-12-19T18:16:12 | 111,359,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | #!/usr/bin/env python3
import fileinput
import humanize
totalSize = 0
noOfFiles = 0
for line in fileinput.input():
perm, owner, size, date, time, *filename = tuple(line.split())
totalSize+=int(size)
noOfFiles +=1
print ('{0} {1} {2:>9} {3} {4} {5}'.format(perm, owner, humanize.naturalsize(size, gnu=True), date, time, ' '.join(filename)))
print('Total Size: {0} | Number of Files:{1}'.format(humanize.naturalsize(totalSize,gnu=True),noOfFiles))
| [
"[email protected]"
] | ||
8375633539d72e93b7c1229e47d7974529ea3381 | d6dacc1cc59e706e0f28294447fc998a28b473a5 | /src/main/python/codeforces/run_599A.py | 6d7dd3d34fa1b9d762596cd1a230235ec6b36bda | [] | no_license | walkerwell/algorithm-data_structure | 8929060a392977c56a00e12ffd28b24026f9b76f | 346f7b40c0e2eca641444e9ed2031e2d9f8275ed | refs/heads/master | 2021-06-06T16:37:02.630013 | 2020-06-16T03:13:59 | 2020-06-16T03:13:59 | 89,243,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | d1,d2,d3=map(int,raw_input().split())
a=[]
minOne = min(d1,d2)
a.append(minOne*2+(d1+d2-minOne)*2)
a.append(d1+d2+d3)
minOne = min(d1,d2)
a.append(minOne*2+d3*2)
print min(a) | [
"[email protected]"
] | |
25b841f075f3629185a27195dc711edccfab72f3 | 518a8848d3d413e20cd6dc19ed53957aee4e53ea | /MNIST/venv/Scripts/pip3.7-script.py | 22ab3b42a2af4ea1aaf6e9d432aa4ceec174c453 | [] | no_license | zhangwenyuan919/ML_DL_assignment | f3774e83b6ea256fb6e5876b53fd2412bd988da2 | 46a08deb6056cbc5c19068acff8e1d334741429b | refs/heads/master | 2020-03-31T13:16:23.348662 | 2019-05-07T03:20:16 | 2019-05-07T03:20:16 | 152,248,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!E:\Pycharm_workstation\MNIST\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
ac45102df968d218fc338e05d26f433a8cbb2a9e | ee012b9195fecccf8294114bd99645dad6b5a9fa | /meal_plans/views.py | 2f7ac4925f2c518bc8a56470b41da25213cc4cee | [] | no_license | Ygritte0/Exercise | 54aee24a98aec8f936ec9abc52a38334d74989a3 | 73a9821ac859df09844a941cb4c682fd11991e6a | refs/heads/master | 2020-05-13T23:38:10.970754 | 2019-04-30T09:53:06 | 2019-04-30T09:53:06 | 181,674,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.shortcuts import render
# Create your views here.
def index(request):
"""膳食规划的笔记"""
return render(request, 'meal_plans/index.html') | [
"[email protected]"
] | |
b67259b545e8cfe6e437767ead8a3adcfa611214 | 68f93f44537e8a7990cf6ea248290717df117761 | /server/mainapp/urls.py | 627c84d4b4391cc5e69e93b65941c97b4a51c783 | [] | no_license | Rassoliny/geek-django | c35511384777757e47c622e3a5d38635238790de | fcc435668fd391fca359a9ed51db78deecb9fcd2 | refs/heads/master | 2020-03-26T13:50:03.498503 | 2018-09-16T16:34:39 | 2018-09-16T16:34:39 | 144,959,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.urls import re_path
from . import views as mainapp
app_name = 'products'
urlpatterns = [
re_path(r'^$', mainapp.main, name='main'),
re_path(r'products/(?P<pk>\d+)', mainapp.products, name='category'),
re_path(r'(?P<pk>\d+)', mainapp.product_detail, name='product'),
]
| [
"[email protected]"
] | |
786ebd5ff17acabbc45a4aa788aa4841f1bbfdf6 | a2fc06cf458f896d2217592ac92098863e755a9c | /scripts/src/LaunchCtrlProcess.py | 4a83daf52ae5f0570f69750e4b6be94068f6f870 | [] | no_license | MrBrood/Stanford-Junior-self-driving-car | ba3f2a07a9366d3566def59fd25f90bad55748d2 | d999e94bb287933666dac82129cad6702923a8e1 | refs/heads/master | 2023-07-18T04:56:02.055754 | 2020-08-21T01:31:46 | 2020-08-21T01:31:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,371 | py | import time
import fcntl
import select
import subprocess
import pty
import threading
import signal
import os, sys
from collections import deque
class LaunchCtrlEntry():
def start(self):
pass
def stop(self):
pass
def kill(self):
pass
def pty(self):
return None
class LaunchCtrlSleep(LaunchCtrlEntry):
def __init__(self, time):
self.time = time
def start(self):
time.sleep(self.time)
class LaunchCtrlProcess(LaunchCtrlEntry):
def __init__(self, path, command, plock=None, restart=False, output=True, env=None):
"""Arguments:
path -- Working directory for the executable
command -- Command line entry to execute, as a str
plock -- Optional common Lock, if multiple Processes will be launched,
this should be provided to prevent multiple concurrent forks
restart -- If true the process will be restarted if it exits for any
reason besides a stop or kill event
output -- Should the process even bother reading it's output
env -- A dict of options environment variables for the process
"""
self._plock = plock
self._stopevent = threading.Event()
self._startevent = threading.Event()
self._killevent = threading.Event()
self._lock = threading.Lock();
self._startCount = 0;
self._running = False;
self._pty = None
#self._stdout = deque(maxlen=1000)
self._returncodes = deque([])
self._path = path
self._command = command
self._restart = restart
self._output = output
self._env = env
self._process = LaunchCtrlThread(self)
self._process.start()
def start(self):
self._lock.acquire()
if not self._running and not self._startevent.isSet():
self._startevent.set()
self._lock.release()
def stop(self):
self._stopevent.set()
def kill(self):
self._killevent.set()
def command(self):
return self._command
def startCount(self):
self._lock.acquire()
count = self._startCount
self._lock.release()
return count
def running(self, value=None):
self._lock.acquire()
running = self._running
self._lock.release()
return running
def stopping(self):
if self._stopevent.isSet():
return True
else:
return False
def pty(self):
self._lock.acquire()
if isinstance(self._pty, int):
try:
self._pty = os.fdopen(self._pty, 'r')
fd = self._pty.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
except:
print "Could not attach to pty for reading"
# @todo: do something sane here
raise
pty = self._pty
self._lock.release()
return pty
def returncodes(self):
self._lock.acquire()
returncodes = self._returncodes
self._lock.release()
return returncodes
class LaunchCtrlThread(threading.Thread):
'''
Seperate thread to manage an individual process. Provides communication
via a pty
'''
def __init__(self, parent):
self.parent = parent
self.parent._lock.acquire()
self.startevent = parent._startevent
self.stopevent = parent._stopevent
self.killevent = parent._killevent
self.path = parent._path
self.env = parent._env
self.command = parent._command.split()
self.restart = parent._restart
self.output = parent._output
self.parent._lock.release()
self.outwriter = None
self.process = None
if self.output:
self.parent._lock.acquire()
(self.parent._pty, slave) = pty.openpty()
self.parent._lock.release()
try:
self.outwriter = os.fdopen(slave, 'w')
fd = self.outwriter.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
except:
print "Could not attach to pty for writing"
# @todo: Do something sane here
raise Exception
threading.Thread.__init__(self)
def run(self):
# !!Rely on the cwd argument to Popen to take care of this
#try:
# os.chdir(self.path)
#except:
# print "Could not cd to", self.path, "Aborting!"
# raise
if self.outwriter == None:
self.outwriter = open(os.devnull)
errfile = subprocess.STDOUT
while True:
try:
select.select([], [], [], 0.1)
except:
pass
# What to do if the process exists
if self.process != None:
self.process.poll()
if self.process.returncode != None:
linestr = "Stopped (-) "+self.command[0]+" ["+str(self.process.returncode)+"]\n"
print linestr,
self.parent._lock.acquire()
self.parent._returncodes.append(self.process.returncode)
self.parent._running = False
self.parent._lock.release()
self.process = None
if self.restart == True and not self.stopevent.isSet():
self.startevent.set()
self.stopevent.clear()
# What to do if we get a start process signal
if self.startevent.isSet():
if self.process == None:
try:
procenv = dict(os.environ.items() + self.env.items())
if self.parent._plock:
self.parent._plock.acquire()
self.process = subprocess.Popen(self.command, stderr=errfile,
stdout=self.outwriter, stdin=open(os.devnull), cwd=self.path,
preexec_fn=os.setsid, env=procenv)
if self.parent._plock:
self.parent._plock.release()
linestr = "Started (+) "+self.command[0]+"\n"
print linestr,
self.parent._lock.acquire()
self.parent._startCount += 1
self.parent._running = True
self.parent._lock.release()
except:
linestr = "Could not run binary", self.command[0], "Aborting!\n"
print linestr,
raise Exception
self.startevent.clear()
# What to do if we get a stop process signal
if self.stopevent.isSet() or self.killevent.isSet():
if self.process != None:
self.process.send_signal(signal.SIGINT)
count = 20
while count > 0:
try:
select.select([], [], [], 0.1)
except Exception, inst:
pass
self.process.poll()
if self.process.returncode == None:
count -= 1
else:
break
if count == 0:
linestr = "SIGKILL to process "+self.command[0]+"\n"
print linestr,
self.process.kill()
self.parent._lock.acquire()
self.parent._running = False
self.parent._lock.release()
else:
self.stopevent.clear()
if self.killevent.isSet():
linestr = "Killed "+self.command[0]+"\n"
print linestr,
return
| [
"[email protected]"
] | |
5f561aaefe686b8c108613598a36bb0fd4b6caae | 4b3de9acd1abf814fbc67197a4398d0f14bc8f72 | /axigen/health.py | 3ee77b0473ac2144cc5f24ed8a9fdb954a3ac7f4 | [] | no_license | drucko/axigen-pyton-api | 178c45b623aa0444e581be66e2f9efd33bdd023f | d72e81deea363d176924442bf0e28e7df8bf5ad3 | refs/heads/master | 2020-12-25T16:13:59.067971 | 2013-06-13T14:44:37 | 2013-06-13T14:44:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | #!/usr/bin/env
from axigen import axigenapi
from axigen.connection import ConnectionError as CErr, SocketError as SErr
class checks(object):
'''
Service Healthckecks class
'''
__axigen = None
__errors = {}
def check_axigen(self, hostname='localhost', port=7000):
'''
Axigen service health checker
'''
status = None
try:
conn = axigenapi.axigenapi(hostname, port)
self.__axigen = conn
except (SErr, CErr), message:
self.__errors['axigen'] = str(message)
return(False)
status = self.__axigen.get_version()
if (status is False):
self.__errors['axigen'] = \
"Couldn't pull verification version info from" \
"Axigen service"
return(False)
# All seems fine with Axigen service. Shall we introduce
# more detailed returned version value check here?
return(True)
def get_errors(self, service=None):
'''
Returns error(s) for all (if service not defined) or just for
one service.
'''
errors = None
if (service is not None):
try:
errors = self.__errors[service]
except KeyError:
errors = None
else:
errors = self.__errors
return(errors)
| [
"[email protected]"
] | |
fa56957dd17f57337f2bd60d81d5d4e67d3e37d9 | 5e454e4fbf08bdf598591fb662a9e3563b204180 | /Pycharm Project/CancerClassification/myapp/models.py | cf332da6c6208a353423f7bae0c92e09e75a8012 | [] | no_license | AminaJaved/FYP | 2e7fc66be691a0928c42fee8a469382c5ade8ebc | fbbff9847cd77d46af760cf5ff9601b68a99a6b6 | refs/heads/master | 2022-11-11T18:21:01.074901 | 2020-05-01T16:16:10 | 2020-07-10T16:17:15 | 204,167,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | from django.db import models
from django.contrib.auth.models import User
class Snippet(models.Model):
name = models.CharField(max_length=100)
body = models.TextField()
def __str__(self):
return self.name
class Cancer(models.Model):
NPFFR1 = models.CharField(max_length=100)
TAS2R19 = models.CharField(max_length=100)
ADRA2C = models.CharField(max_length=100)
SOWAHA = models.CharField(max_length=100)
TAS2R60 = models.CharField(max_length=100)
KRT20 = models.CharField(max_length=100)
AADACL4 = models.CharField(max_length=100)
NACA2 = models.CharField(max_length=100)
CLDN8 = models.CharField(max_length=100)
GJC3 = models.CharField(max_length=100)
FEM1A = models.CharField(max_length=100)
DRD5 = models.CharField(max_length=100)
PABPC3 = models.CharField(max_length=100)
HIST1H4C = models.CharField(max_length=100)
MC2R = models.CharField(max_length=100)
def __str__(self):
return self.NPFFR1
class visualise(models.Model):
Cancer_Class = models.CharField(max_length=100)
image = models.FileField(null=True,blank=True)
Description = models.TextField()
Symptoms = models.TextField()
def __str__(self):
return self.Cancer_Class
STATUS = (
(0,"Draft"),
(1,"Publish")
)
class Blog(models.Model):
title = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
author = models.ForeignKey(User, on_delete= models.CASCADE,related_name='blog_posts')
updated_on = models.DateTimeField(auto_now= True)
content = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS, default=0)
class Meta:
ordering = ['-created_on']
def __str__(self):
return self.title | [
"[email protected]"
] | |
27c6f04530538b5ac8c71908ab91361f20ecc16b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4013/codes/1671_1079.py | c3bb6e4c00135c7fac261439e8a41a85fc6fb9ce | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Ao testar sua solução, não se limite ao caso de exemplo.
from math import *
# Leitura dos lados do triangulo a, b, and c
a = float(input ("Lado 1: "))
b = float(input ("Lado 2: "))
c = float(input ("Lado 3: "))
print("Entradas:", a, ",", b, ",", c)
# Testa se pelo menos uma das entradas eh negativa
if ((a > 0) or (b > 0) or (c > 0 )):
# Testa se medidas correspondem aas de um triangulo
if ((a < b + c) and (b < a + c) and (c < a + b)):
s = (a + b + c) / 2.0
area = sqrt(s * (s-a) * (s-b) * (s-c))
area = round(area, 3)
print("Area:", area)
else:
print("Area: invalida")
else:
print("Area: invalida")
| [
"[email protected]"
] | |
312a55c81f5be400528ad394e130213573c7a209 | 7f827c5f4d689d36edb26d1bbd53a55895218100 | /accounts/migrations/0001_initial.py | c687b42eb4622568917c1d2eeff249f5ba9caf41 | [] | no_license | kanikamital0606/Email-Verification-in-Django- | 52b967a8c238cf43fb00e230c43604ee39fd5bd2 | 84bb6063305f70c81c949d74d57b2b6f78fc5392 | refs/heads/master | 2023-07-15T20:34:46.246834 | 2021-08-26T17:48:46 | 2021-08-26T17:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | # Generated by Django 3.2.6 on 2021-08-26 16:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('auth_token', models.CharField(max_length=100)),
('is_verified', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
b2f8acc6d23fcaa0a04225dff3672850cb63c7b4 | 0ebda7376eaf61a883094957d917b5f5ad945ce1 | /_deprecated/commands/main/AboutCommand.py | 81ccd3c1b9477dc7c59787634ba069c8f0e2aa6c | [
"Apache-2.0"
] | permissive | nogipx/morris | 6bcdb7f5320bd94075b02907c553477a4b3d3280 | 955688c6c0b0cae6c3d11ea49b17004477d4fd9b | refs/heads/master | 2022-01-05T17:28:38.810492 | 2019-05-12T18:45:33 | 2019-05-12T18:45:33 | 114,779,205 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | from commands.core import Command
class AboutCommand(Command):
def __init__(self):
super().__init__()
self.triggers = ['about', 'About']
def proceed(self, member, message, attachments, group, **kwargs):
if len(args) > 0 and args[1] in self._triggers:
about = """
(C) Morris Bot, 2018
Моррис изначально представлялся в воображении обычным скриптом на ~100-200 строк, который только и может, что пересылать сообщения от администраторов.
Нутро, однако, посчитало, что этого мало, и вот Вы видите то, что видите...
Посмотреть и оценить (это обязательно :D) Вы можете в моём аккаунте Github:
- https://github.com/nogip/morris
А связаться со мной через:
- Vk: vk.com/nogip
- Instagram: instagram.com/balzph
Developed by Mamatkazin Karim, г.Сочи
"""
return about
return True
| [
"[email protected]"
] | |
b399717e8a00144e38a55ad77678740ca8dd648d | dc083d8bebc6872552ab9432a7a92eefe4790c19 | /dilipadsite/dilipadsite/views.py | 028b01b67fecf85bd302b2f48d063b7c5cdbb9ec | [] | no_license | twhyte/lipad | 392a97eec81709d7878ea5137e85b064e3b197b6 | 5ae2b7ef167367b7ef3b1763b6d304a91bbde9c5 | refs/heads/master | 2020-12-24T07:35:07.520983 | 2018-01-19T16:31:05 | 2018-01-19T16:31:05 | 58,824,095 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | import unicodecsv as csv
from dilipadsite.models import basehansard
from django.http import StreamingHttpResponse, HttpResponse
from django.utils.encoding import smart_str
import codecs
class Echo(object):
"""An object that implements just the write method of the file-like
interface.
"""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
def get_model_fields(model):
return model._meta.fields
##def streaming_csv_old(request, qs, filename):
## '''This doesn't actually stream--but it works. Pre-generates the .csv
## Keeping this method around as a backup'''
## opts = qs.model._meta
## model = qs.model
## response = HttpResponse(content_type='text/csv')
## # force download.
## response['Content-Disposition'] = ('attachment;filename=%s.csv' % filename)
## # the csv writer
## writer = csv.writer(response)
## field_names = [field.name for field in opts.fields]
## # Write a first row with header information
## writer.writerow(field_names)
## # Write data rows
## for obj in qs:
## writer.writerow([getattr(obj, field) for field in field_names])
## return response
def stream_response_generator(qs):
"""Streaming function to return data iteratively """
opts = qs.model._meta
model = qs.model
field_names = [field.name for field in opts.fields]
yield (field_names)
for obj in qs:
yield([getattr(obj, field) for field in field_names])
def streaming_csv(request, qs, filename):
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse((writer.writerow(row) for row in stream_response_generator(qs)),
content_type="text/csv")
response['Content-Disposition'] = ('attachment;filename=%s.csv' % filename)
return response
| [
"[email protected]"
] | |
d92259e46bd7342cece9cce147714cfe45f8db36 | 3cf89786d0cf96bc3e9e64e69ac3691026f5e627 | /Class.py | ab55bf4d95a8a842da45af559c70bfa6d6276ee3 | [] | no_license | UofTPsychProg-fall-2019/project-1-AnnZhang1997 | 8ea264214e83e0901f5222b29fab1f99fd639410 | 0673261594395dac06a90bdf7c0f9657f4644ae7 | refs/heads/master | 2020-12-13T13:33:11.794593 | 2020-01-21T17:48:54 | 2020-01-21T17:48:54 | 234,433,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,112 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 19:03:44 2020
@author: Ann Zhang
This module contains the code for class MTMSubjectMice and its functions.
"""
import pandas as pd
class MTMSubjectMouse:
"""A mouse that is a subject in the multiple time memory experiment.
=== Attributes ===
SubjectNum:
The subject number of the mouse, which is a integer in the range 1-48.
CPPchambers:
The paired and unpaired chambers for MTMSubjectMouse during CPP.
CPAchambers:
The paired and unpaired chambers for MTMSubjectMouse during CPA.
TrainTime:
The time of day that mouse receives CPP and CPA training.
Group:
The stimulus the mouse is tested for, can be either "CPP" or "CPA".
Whether the time of test matches the time of training, can be either "ON" or "OFF".
habituation:
A dictionary recording the total time the subject mouse spent in
the two chambers: "paired" and "unpaired" during habituation.
test:
A dictionary recording the total time the subject mouse spent in
the two chambers: "paired" and "unpaired" during testing.
"""
SubjectNum: int
CPPchambers: dict
CPAchambers: dict
TrainTime: dict
Group: list
TestTime: int
habituation: dict
test: dict
def __init__(self, num: int) -> None:
"""Initialize a MTMSubjectMouse with subject number "num".
"""
self.SubjectNum = num
self.CPPchambers = {"paired":"", "unpaired":""}
self.CPAchambers = {"paired":"", "unpaired":""}
self.TrainTime = {"CPP": 0, "CPA": 0}
self.Group = ["", ""]
self.TestTime = 0
self.habituation = {"paired": 0, "unpaired": 0}
self.test = {"paired": 0, "unpaired": 0}
def __repr__(self) -> str:
return "Mouse {0} in {1} Test-{2} group.".format(self.SubjectNum,
self.Group[0],
self.Group[1])
def __str__(self) -> str:
return "Mouse {0} in {1} Test-{2} group.\n \
CPP/CPA Train Time: {3}/{4}.\n \
CPP/CPA paired chamber: {5}/{6} \n \
Test time: {7}".format(self.SubjectNum,
self.Group[0], self.Group[1], self.TrainTime["CPP"],
self.TrainTime["CPA"],self.CPPchambers["paired"],
self.CPAchambers["paired"], self.TestTime)
def setchambers(self, stimulus: str, paired: str, unpaired: str) -> None:
"""
Update the paired and unpaired chambers for stimulus when stimulus is
a valid stimulus.
"""
try:
if stimulus.lower() == "cpp":
self.CPPchambers["paired"] = paired
self.CPPchambers["unpaired"] = unpaired
elif stimulus.lower() == "cpa":
self.CPAchambers["paired"] = paired
self.CPAchambers["unpaired"] = paired
except:
print ("This is not a valid stimulus!")
def settraintime(self, stimulus: str, time: int) -> None
"""
Update the training time for stimulus if stimulus is a valid stimulus.
"""
try:
if stimulus.lower() == "cpp":
self.TrainTime["CPP"] = time
elif stimulus.lower() == "cpa":
self.TrainTime["CPA"] = time
except:
print ("This is not a valid stimulus!")
def setgroup(self, stimulus: str, teston: str) -> None:
"""
Update the group of mouse, based on which stimulus it will be tested
for and if testing time matches training time.
"""
if stimulus == "CPP" or stimulus == "CPA":
self.Group = [stimulus, teston]
else:
print ("This is not a valid stimulus!")
def isteston(self) -> bool:
"""
Returns whether the mice is in a TEST ON (test time matches training
time) group or a TEST OFF (test time does not match training time)
group.
"""
if self.Group[-1] == "ON":
return True
elif self.Group[-1] == "OFF":
return FALSE
elif self.Group[-1] == "":
print ("Test Group has not been set.")
else:
print ("The test Group" + self.Group[-1] + "is not valid.")
def settesttime(self) -> None:
"""
Update the test time for mouse, based on its group and training time.
"""
if self.Group[0] == "CPP":
if self.isteston:
self.TestTime = self.TrainTime["CPP"]
else:
self.TestTime = self.TrainTime["CPA"]
else:
if self.isteston:
self.TestTime = self.TrainTime["CPA"]
else:
self.TestTime = self.TrainTime["CPP"]
def recorddwelltime(self, section: str, paired: int, unpaired: int) -> None:
"""Records the dwell time.
section can be either "habituation" or "test".
paired is the number of seconds the subject spent in the paired chamber.
unpaired is the number of seconds the subject spent in the unpaired chamber.
"""
if section.lower() == "habituation":
self.habituation["paired"] = paired
self.habituation["unpaired"] = unpaired
elif section.lower() == "test":
self.test["paired"] = paired
self.test["unpaired"] = unpaired
else:
raise Exception("Not a valid section!")
def outputdf(self, slot: str) -> pd.DataFrame:
"""Output a 1 x 9 dataframe.
The columns are: slot, subjectnum, group, CPP, CPA, hab-paired,
hab-unpaired, test-paired, test-unpaired.
Slot records which slot in the stack was the mouse housed in.
Subjectnum records the subject number of mouse.
Group records the stimulus and test-training time match.
CPP records the CPP-paired chamber and CPP-training time.
CPA records the CPA-paired chamber and CPP-training time.
hab-paired and hab-unpaired records the number of seconds that mouse
spent in the test-stimulus-paired and -unpaired chambers during habituation.
test-paired and test-unpaired records the number of seconds that mouse
spent in the test-stimulus-paired and -unpaired chambers during test.
"""
group = self.Group[0] + " Test-" + self.Group[1]
cpp = self.CPPchambers["paired"] + " at " + str(self.TrainTime["CPP"])
cpa = self.CPAchambers["paired"] + " at " + str(self.TrainTime["CPA"])
d = {"slot": [slot], "subjectnum": [self.SubjectNum], "group": [group],
"CPP": [cpp], "CPA": [cpa], "hab-paired": [self.habituation["paired"]],
"hab-unpaired": [self.habituation["unpaired"]], "test-paired":
[self.test["paired"]], "test-unpaired": [self.test["unpaired"]]}
df = pd.DataFrame(data = d)
return df
| [
"[email protected]"
] | |
a3833b102545d1c9387ae8d1e32f5eb76d98b791 | dd097c7ae744227b0312d762ee0482a3380ff8c6 | /interptools.py | be2c119cfc1ff251f2c6d11d1db94c2279481ab4 | [] | no_license | moflaher/workspace_python | 0d6e98274d923a721db2b345f65c20b02ca59d08 | 6551e3602ead3373eafce10d11ce7b96bdcb106f | refs/heads/master | 2023-03-06T02:15:01.945481 | 2023-03-01T19:15:51 | 2023-03-01T19:15:51 | 20,814,932 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 21,812 | py | from __future__ import division,print_function
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.tri as mplt
import matplotlib.pyplot as plt
import os, sys
import scipy.io as sio
import gridtools as gt
import datatools as dt
import plottools as pt
import projtools as pjt
import misctools as mt
from matplotlib.collections import LineCollection as LC
import seawater as sw
np.set_printoptions(precision=16,suppress=True,threshold=sys.maxsize)
import bisect
import scipy.interpolate as spitp
import matplotlib.path as path
"""
Front Matter
=============
Created in 2014
Author: Mitchell O'Flaherty-Sproul
A bunch of functions dealing with fvcom interpolation.
"""
def interpE_at_loc(data,varname,loc,layer=None,ll=True):
"""
Interpolate element data at a location. If variable is 3d then specify a layer, defaults to surface layer otherwise.
Note: 1d element data will break this, should be possible to handle. I will work out the logic another day.
:Parameters:
data - data dictionary from loadnc
varname - element data variable name. (2d or 3d)
loc - location
:Optional:
layer - default None. Specify which layer of 3d data to use
ll - default True. Is point lon/lat or xy.
"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
if ((len(data[varname].shape)>2) and (layer==None)):
print('3d variable specified without layer. Returning surface layer.')
layer=0
elif ((len(data[varname].shape)==2) and (layer!=None)):
print('2d variable specified with layer. That would break things, unspecifing layer.')
layer=None
loc=np.array(loc)
host=data[trifinder].__call__(loc[0],loc[1])
if host==-1:
print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.')
out=np.empty(shape=(data[varname][:,layer,host]).squeeze().shape)
out[:]=np.nan
return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][host,:],loc)
else:
x0c=loc[0]-data['uvnode'][host,0]
y0c=loc[1]-data['uvnode'][host,1]
e0=data['nbe'][host,0]
e1=data['nbe'][host,1]
e2=data['nbe'][host,2]
var_e=(data[varname][:,layer,host]).squeeze()
if e0==-1:
var_0=np.zeros(shape=var_e.shape,dtype=var_e.dtype)
else:
var_0=(data[varname][:,layer,e0]).squeeze()
if e1==-1:
var_1=np.zeros(shape=var_e.shape,dtype=var_e.dtype)
else:
var_1=(data[varname][:,layer,e1]).squeeze()
if e2==-1:
var_2=np.zeros(shape=var_e.shape,dtype=var_e.dtype)
else:
var_2=(data[varname][:,layer,e2]).squeeze()
dvardx= data['a1u'][0,host]*var_e+data['a1u'][1,host]*var_0+data['a1u'][2,host]*var_1+data['a1u'][3,host]*var_2
dvardy= data['a2u'][0,host]*var_e+data['a2u'][1,host]*var_0+data['a2u'][2,host]*var_1+data['a2u'][3,host]*var_2
var= var_e + dvardx*x0c + dvardy*y0c
return var
def interpN_at_loc(data,varname,loc,layer=None,ll=True):
"""
Interpolate nodal data at a location. If variable is 3d then specify a layer, defaults to surface layer otherwise.
Note: 1d element data will break this, should be possible to handle. I will work out the logic another day.
data - data dictionary from loadnc
varname - nodal data variable name. (1d or 2d or 3d)
loc - location
Optional:
layer - default None. Specify which layer of 3d data to use
ll - default True. Is point lon/lat or xy.
"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
if ((len(data[varname].shape)>2) and (layer==None)):
print('3d variable specified without layer. Returning surface layer.')
layer=0
elif ((len(data[varname].shape)==2) and (layer!=None)):
print('2d variable specified with layer. That would break things, unspecifing layer.')
layer=None
loc=np.array(loc)
host=data[trifinder].__call__(loc[0],loc[1])
if host==-1:
print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.')
if len(data[varname].shape)==1:
out=np.nan
else:
out=np.empty(shape=(data[varname][:,layer,host]).squeeze().shape)
out[:]=np.nan
return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][host,:],loc)
else:
x0c=loc[0]-data['uvnode'][host,0]
y0c=loc[1]-data['uvnode'][host,1]
n0=data['nv'][host,0]
n1=data['nv'][host,1]
n2=data['nv'][host,2]
#To deal with 1d data, should be a better way to handle this....
#This can all be vectorized, checkout robies code could make a factor of 2 difference.
if len(data[varname].shape)==1:
nvar0=data[varname][n0]
nvar1=data[varname][n1]
nvar2=data[varname][n2]
else:
nvar0=(data[varname][:,layer,n0]).squeeze()
nvar1=(data[varname][:,layer,n1]).squeeze()
nvar2=(data[varname][:,layer,n2]).squeeze()
var_0=data['aw0'][0,host]*nvar0+data['aw0'][1,host]*nvar1+data['aw0'][2,host]*nvar2
var_x=data['awx'][0,host]*nvar0+data['awx'][1,host]*nvar1+data['awx'][2,host]*nvar2
var_y=data['awy'][0,host]*nvar0+data['awy'][1,host]*nvar1+data['awy'][2,host]*nvar2
var= var_0 + var_x*x0c + var_y*y0c
return var
def interpEfield_locs(data,varname,locs,timein,layer=None,ll=False,fill_value=-9999,hosts=[]):
#"""
#Interpolate element data at a location. If variable is 3d then specify a layer, defaults to surface layer otherwise.
#Note: 1d element data will break this, should be possible to handle. I will work out the logic another day.
#:Parameters:
#data - data dictionary from loadnc
#varname - element data variable name. (2d or 3d)
#loc - location
#:Optional:
#layer - default None. Specify which layer of 3d data to use
#ll - default True. Is point lon/lat or xy.
#fill_value - default -9999 when points are outside the domain they return fill_value
#"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
if ((len(data[varname].shape)>2) and (layer==None)):
print('3d variable specified without layer. Returning surface layer.')
layer=0
elif ((len(data[varname].shape)==2) and (layer!=None)):
print('2d variable specified with layer. That would break things, unspecifing layer.')
layer=None
locs=np.atleast_2d(locs)
#Only find the hosts if not given
if hosts==[]:
hosts=data[trifinder].__call__(locs[:,0],locs[:,1])
#if host==-1:
#print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.'
#out=np.empty(shape=(data[varname][timein,layer,host]).squeeze().shape)
#out[:]=np.nan
#return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][hosts,:].flatten(),locs.flatten())
else:
x0c=locs[:,0]-data['uvnode'][hosts,0]
y0c=locs[:,1]-data['uvnode'][hosts,1]
e0=data['nbe'][hosts,0]
e1=data['nbe'][hosts,1]
e2=data['nbe'][hosts,2]
var_e=(data[varname][timein,layer,hosts]).flatten()
var_0=(data[varname][timein,layer,e0]).flatten()
var_1=(data[varname][timein,layer,e1]).flatten()
var_2=(data[varname][timein,layer,e2]).flatten()
var_0[e0==-1]=0
var_1[e1==-1]=0
var_2[e2==-1]=0
dvardx= data['a1u'][0,hosts]*var_e+data['a1u'][1,hosts]*var_0+data['a1u'][2,hosts]*var_1+data['a1u'][3,hosts]*var_2
dvardy= data['a2u'][0,hosts]*var_e+data['a2u'][1,hosts]*var_0+data['a2u'][2,hosts]*var_1+data['a2u'][3,hosts]*var_2
var= var_e + dvardx*x0c + dvardy*y0c
# Handle any points outside the domain
var[hosts==-1]=fill_value
return var
def interpNfield_locs(data,varname,locs,timein,ll=False,fill_value=-9999,hosts=[]):
#"""
#Interpolate node data at a location.
#
#:Parameters:
#data - data dictionary from loadnc
#varname - element data variable name.
#loc - location
#
#:Optional:
#ll - default True. Is point lon/lat or xy.
#fill_value - default -9999 when points are outside the domain they return fill_value
#"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
locs=np.atleast_2d(locs)
#Only find the hosts if not given
if hosts==[]:
hosts=data[trifinder].__call__(locs[:,0],locs[:,1])
#if host==-1:
#print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.'
#out=np.empty(shape=(data[varname][timein,layer,host]).squeeze().shape)
#out[:]=np.nan
#return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][hosts,:].flatten(),locs.flatten())
else:
x0c=locs[:,0]-data['uvnode'][hosts,0]
y0c=locs[:,1]-data['uvnode'][hosts,1]
n0=data['nv'][hosts,0]
n1=data['nv'][hosts,1]
n2=data['nv'][hosts,2]
#To deal with 1d data, should be a better way to handle this....
#This can all be vectorized, checkout robies code could make a factor of 2 difference.
if len(data[varname].shape)==1:
nvar0=data[varname][n0]
nvar1=data[varname][n1]
nvar2=data[varname][n2]
else:
nvar0=(data[varname][timein,n0]).squeeze()
nvar1=(data[varname][timein,n1]).squeeze()
nvar2=(data[varname][timein,n2]).squeeze()
var_0=data['aw0'][0,hosts]*nvar0+data['aw0'][1,hosts]*nvar1+data['aw0'][2,hosts]*nvar2
var_x=data['awx'][0,hosts]*nvar0+data['awx'][1,hosts]*nvar1+data['awx'][2,hosts]*nvar2
var_y=data['awy'][0,hosts]*nvar0+data['awy'][1,hosts]*nvar1+data['awy'][2,hosts]*nvar2
var= var_0 + var_x*x0c + var_y*y0c
# Handle any points outside the domain
var[hosts==-1]=fill_value
return var
def cross_shore_transect_2d(grid,name,region,vec,npt):
data = dt.loadnc('runs/'+grid+'/'+name+'/output/',singlename=grid + '_0001.nc')
print('done load')
data = dt.ncdatasort(data,trifinder=True)
print('done sort')
cages=gt.loadcage('runs/'+grid+'/' +name+ '/input/' +grid+ '_cage.dat')
if np.shape(cages)!=():
tmparray=[list(zip(data['nodell'][data['nv'][i,[0,1,2,0]],0],data['nodell'][data['nv'][i,[0,1,2,0]],1])) for i in cages ]
color='g'
lw=.2
ls='solid'
vectorstart=np.array(vec[0])
vectorend=np.array(vec[1])
vectorx=np.array([vectorstart[0],vectorend[0]])
vectory=np.array([vectorstart[1],vectorend[1]])
snv=(vectorend-vectorstart)/np.linalg.norm(vectorend-vectorstart)
xi=np.linspace(vectorstart[0],vectorend[0],npt)
yi=np.linspace(vectorstart[1],vectorend[1],npt)
us=data['ua'].shape
savepath='data/cross_shore_transect/'
if not os.path.exists(savepath): os.makedirs(savepath)
plotpath='figures/png/'+grid+'_2d/cross_shore_transect/'
if not os.path.exists(plotpath): os.makedirs(plotpath)
nidx=dt.get_nodes(data,region)
f=plt.figure()
ax=f.add_axes([.125,.1,.775,.8])
triax=ax.tripcolor(data['trigrid'],data['h'],vmin=data['h'][nidx].min(),vmax=data['h'][nidx].max())
ax.plot(xi,yi,'k',lw=3)
if np.shape(cages)!=():
lseg_t=LC(tmparray,linewidths = lw,linestyles=ls,color=color)
coast=ax.add_collection(lseg_t)
coast.set_zorder(30)
pt.prettyplot_ll(ax,setregion=region,cb=triax,cblabel=r'Depth (m)')
f.savefig(plotpath + name+'_'+('%f'%vectorx[0])+'_'+('%f'%vectorx[1])+'_'+('%f'%vectory[0])+'_'+('%f'%vectory[1])+'_'+('%d'%len(xi))+'_line_location.png',dpi=600)
plt.close(f)
fillarray_u=np.empty((us[0],npt))
fillarray_v=np.empty((us[0],npt))
fillalong=np.empty((us[0],npt))
fillcross=np.empty((us[0],npt))
dist=np.empty((npt,))
h=np.empty((npt,))
print('interp uvw on path')
for i in range(0,len(xi)):
print(i)
fillarray_u[:,i]=interpE_at_loc(data,'ua',[xi[i],yi[i]])
fillarray_v[:,i]=interpE_at_loc(data,'va',[xi[i],yi[i]])
h[i]=interpN_at_loc(data,'h',[xi[i],yi[i]])
print('Calc along path current')
for i in range(0,len(xi)):
print(i)
inner=np.inner(np.vstack([fillarray_u[:,i],fillarray_v[:,i]]).T,snv)
along=np.vstack([inner*snv[0],inner*snv[1]]).T
tmpa=np.multiply(np.sign(np.arctan2(along[:,1],along[:,0])),np.linalg.norm(along,axis=1))
fillalong[:,i]=tmpa
cross=np.vstack([fillarray_u[:,i],fillarray_v[:,i]]).T-along
tmpc=np.multiply(np.sign(np.arctan2(cross[:,1],cross[:,0])),np.linalg.norm(cross,axis=1))
fillcross[:,i]=tmpc
dist[i]=(sw.dist([vectorstart[1], yi[i]],[vectorstart[0], xi[i]],'km'))[0]*1000;
if np.shape(cages)!=():
incage=np.zeros((len(xi),))
host=data['trigrid'].get_trifinder().__call__(xi,yi)
incage[np.in1d(host,cages)]=1
savedic={}
savedic['u']=fillarray_u
savedic['v']=fillarray_v
savedic['along']=fillalong
savedic['cross']=fillcross
savedic['distance']=dist
savedic['h']=h
savedic['lon']=xi
savedic['lat']=yi
if np.shape(cages)!=():
savedic['incage']=incage
np.save(savepath+grid+'_'+name+'_'+('%f'%vectorx[0])+'_'+('%f'%vectorx[1])+'_'+('%f'%vectory[0])+'_'+('%f'%vectory[1])+'_'+('%d'%len(xi))+'_2d.npy',savedic)
sio.savemat(savepath+'matfiles/'+grid+'_'+name+'_'+('%f'%vectorx[0])+'_'+('%f'%vectorx[1])+'_'+('%f'%vectory[0])+'_'+('%f'%vectory[1])+'_'+('%d'%len(xi))+'_2d.mat',mdict=savedic)
def interpol(data_1, data_2, time_step=5.0/(24*60)):
dt_1 = data_1['time']
dt_2 = data_2['time']
# generate interpolation functions using linear interpolation
f1 = interp1d(dt_1, data_1['pts'])
f2 = interp1d(dt_2, data_2['pts'])
# choose interval on which to interpolate
start = max(dt_1[0], dt_2[0])
end = min(dt_1[-1], dt_2[-1])
# create timestamp array for new data and perform interpolation
output_times = np.arange(start,end,time_step)
series_1 = f1(output_times)
series_2 = f2(output_times)
dt_start = max(dt_1[0], dt_2[0])
return (series_1, series_2, output_times, time_step)
def interp1d(in_time, in_data, out_time, kind='linear'):
"""
Takes data (1d) and its timestamp. Returns the linear interpolates the vector to a second timestamp.
:Parameters:
in_data - data to interpolate
in_time - timestamp of in_data
out_time - timestamps to output
:Optional:
kind - sets the linear interpolator kind used in scipy.interpolate.interp1d
"""
# generate interpolation functions using linear interpolation
f = spitp.interp1d(in_time, in_data, kind=kind, bounds_error=False)
# Create output data
out_data = f(out_time)
return out_data
def get_riops_weights(ri,locations):
"""
Function to calculate interpolation weights for riops to points.
"""
print('Processing weights')
lon=ri['nav_lon'][:]-360
lat=ri['nav_lat'][:]
lo,la,proj=pjt.lcc(lon,lat)
ll=np.array(proj(locations[:,0],locations[:,1])).T
bll=mt.boxminmax(ll)
idx=np.empty((len(locations),2),dtype=int)
weights=np.empty((len(locations[:,0]),4))
for i in range(ri['nav_lon'].shape[0]-1):
for j in range(ri['nav_lon'].shape[1]-1):
a=np.array([lo[i,j],lo[i,j+1],lo[i+1,j+1],lo[i+1,j]])
b=np.array([la[i,j],la[i,j+1],la[i+1,j+1],la[i+1,j]])
if b.max()<np.min(bll[2:]) or b.min()>np.max(bll[2:]):
continue
if a.min()>np.max(bll[:2]) or a.max()<np.min(bll[:2]):
continue
p=path.Path(np.vstack([a,b]).T)
tidx=p.contains_points(ll)
if np.sum(tidx)>0:
for k in range(len(tidx)):
if tidx[k]:
idx[k,]=np.array([i,j])
for k,tt in enumerate(idx):
i=tt[0]
j=tt[1]
a=np.array([lo[i,j],lo[i,j+1],lo[i+1,j+1],lo[i+1,j]])
b=np.array([la[i,j],la[i,j+1],la[i+1,j+1],la[i+1,j]])
dist=np.sqrt((a-ll[k,0])**2+(b-ll[k,1])**2)
weights[k,:]=(dist**2)*np.sum(1/dist**2)
print('Done processing weights')
return weights, idx
def interp_riops(field, weights, idx):
"""
Interpolate riops using weights.
"""
try:
import pyximport; pyximport.install()
import interp_riops as ir
out=ir.interp_riops_c(field,weights,idx)
return out
except:
print('There was an issue with during using cython falling back to python.')
out=np.empty((len(idx),))
for k,tt in enumerate(idx):
i=tt[0]
j=tt[1]
vals=np.array([field[i,j],field[i,j+1],field[i+1,j+1],field[i+1,j]])
out[k]=np.nansum(vals/weights[k,:])
return out
def spread_field(fieldin):
"""
Spread a gridded field down and then out.
"""
fs=np.array(fieldin.shape)
if len(fs)==3:
field=fieldin[0,].reshape(-1)
else:
field=fieldin.reshape(-1)
try:
import pyximport; pyximport.install()
import interp_riops as ir
field=ir.spread_field_c(field, fs[1], fs[2])
except:
print('There was an issue with during using cython falling back to python.')
while np.sum(field.mask)>0:
for i in range(1,fs[1]-1):
for j in range(1,fs[2]-1):
if field.mask[i*fs[2]+j]:
idx=np.array([(i-1)*fs[2]+(j-1),(i-1)*fs[2]+(j),(i-1)*fs[2]+(j+1),
(i)*fs[2]+(j-1),(i)*fs[2]+(j+1),
(i+1)*fs[2]+(j-1),(i+1)*fs[2]+(j),(i+1)*fs[2]+(j+1)])
if np.sum(~field.mask[idx])>0:
ridx=idx[~field.mask[idx]]
pmean=field[ridx]
field[i*fs[2]+j]=np.mean(pmean)
i=0
for j in range(0,fs[2]):
if field.mask[i*fs[2]+j] and not field.mask[(i+1)*fs[2]+j]:
field[i*fs[2]+j]=field[(i+1)*fs[2]+j]
i=fs[1]-1
for j in range(0,fs[2]):
if field.mask[i*fs[2]+j] and not field.mask[(i-1)*fs[2]+j]:
field[i*fs[2]+j]=field[(i-1)*fs[2]+j]
j=0
for i in range(0,fs[1]):
if field.mask[i*fs[2]+j] and not field.mask[i*fs[2]+(j+1)]:
field[i*fs[2]+j]=field[i*fs[2]+(j+1)]
j=fs[2]-1
for i in range(0,fs[1]):
if field.mask[i*fs[2]+j] and not field.mask[i*fs[2]+(j-1)]:
field[i*fs[2]+j]=field[i*fs[2]+(j-1)]
if len(fs)==3:
fieldin[0,:]=field.reshape(fs[1],fs[2])
for i in range(1,fieldin.shape[0]):
fieldin[i,fieldin.mask[i,]]=fieldin[i-1,fieldin.mask[i,]]
else:
fieldin=field.reshape(fs)
return fieldin
| [
"[email protected]"
] | |
ecaba36b4b380ae04e3c98b69fa92b4a3c677435 | 0964a05c266d52c2840fe52a4a550c1e88d03b22 | /scanner.py | e08ff775182f91a43ce9a846e3ee2cb1b4b8c57f | [] | no_license | ngriskauskas/CS4280 | a0d5244e129589d97fa9a9924a41629ed0fc592c | ca5898f3877edb2ca18f61d5a80c9501de49a069 | refs/heads/main | 2023-02-02T20:43:51.505489 | 2020-12-14T06:13:03 | 2020-12-14T06:13:03 | 321,250,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from enum import Enum
from filter import Filter
keywords = ["start", "stop", "iter", "void", "int", "exit",
"scanf", "printf", "main", "if", "then", "let", "data", "func"]
operators = ["=", "=>", "=<", "==", ":", "+", "-", "*",
"/", "%", ".", "(", ")", ",", "{", "}", ";", "[", "]"]
class TokenID(Enum):
IDENT_tk = 1
NUM_tk = 2
KW_tk = 3
OP_tk = 4
EOF_tk = 5
class Token:
def __init__(self, id, instance, line):
self.id = id
self.instance = instance
self.line = line
class Scanner:
def __init__(self, fileName):
self.Filter = Filter(fileName)
def GetNextToken(self):
(word, line) = self.Filter.GetNextString()
if word == "EOF":
return Token(TokenID.EOF_tk, word, line)
if word in keywords:
return Token(TokenID.KW_tk, word, line)
elif word in operators:
return Token(TokenID.OP_tk, word, line)
elif word.isnumeric():
return Token(TokenID.NUM_tk, word[0: 7], line)
elif word[0].islower() and word.isalnum():
return Token(TokenID.IDENT_tk, word[0: 7], line)
else:
raise Exception("Scanner Failed to read", word, line)
| [
"[email protected]"
] | |
dd372fa5126667291f62ebe68819df5c4383239d | 982a904a83e2caa7acd8b2ac19cfc5a4fb75bde1 | /examples/ch12/snippets_py/12_02.12selfcheck.py | 38e7400581c8da0307e75b56462cbcd607ef5a28 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pdeitel/IntroToPython | 73bc349fe40701b51f49d17d7fbc5b9985885e48 | 978093febf2ed849a2049e0b0860d2c4998306f7 | refs/heads/master | 2023-02-09T08:04:15.313698 | 2023-02-03T23:23:42 | 2023-02-03T23:23:42 | 173,331,130 | 249 | 371 | null | 2022-12-04T06:52:26 | 2019-03-01T16:08:37 | null | UTF-8 | Python | false | false | 1,183 | py | # Section 12.2.12 Self Check snippets
# Exercise 2
from textblob import Word
word = Word('boat')
word.synsets
word.definitions
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
| [
"[email protected]"
] | |
a629ff545360e6bd157e394d377cbc1f1330141e | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_6/mtttaf002/question1.py | 9e72945c0d743ddcf7d64cd2596254bb5b69226b | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | """produce right aligned list of names
tafara mtutu
20 apr 2014"""
names = []
count = 0
aligned = []
sort = ""
#ask user for names
print("Enter strings (end with DONE):")
name = input()
while name.lower() != "done":
if count < len(name):
count = len(name)
names.append(name)
name = input()
#make length of equal to the length of longest string
for i in names:
sort = " "*(count-len(i)) + i
aligned.append(sort)
print()
print("Right-aligned list:")
for j in aligned:
print(j)
| [
"[email protected]"
] | |
4003548109aa6ef8ce4e423121fdf12cee5d618a | 72d789d4a392a7c5ddd15d0db9451436c24b6866 | /Naive_Bayes/simple_bayes.py | 9853039dd77c0ab259e2e782d64d2723ed7fa61a | [] | no_license | chaojimali666/cs229_learning | e78e08d3fb2e5dc8bc9ab1dcb1899ed06a2ef34b | 3250e1d3e090d6bb73ee87cd96e087821195f249 | refs/heads/master | 2020-03-24T16:10:51.448201 | 2018-09-07T10:35:39 | 2018-09-07T10:35:39 | 142,815,559 | 0 | 0 | null | 2018-09-07T10:35:40 | 2018-07-30T02:37:01 | Python | UTF-8 | Python | false | false | 5,371 | py | '''
PostingList is From http://ml.apachecn.org/mlia/naive-bayes/
Theory refers to Andrew Ng's lecture note part 4
'''
from numpy import *
import re
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1 is abusive, 0 not
return postingList,classVec
def get_feature_vector(postingList):
"""
get the word dict
"""
x_dict = [word for wordlist in postingList for word in wordlist]
x_dict = set(x_dict) #remove repeated element
x_dict = list(x_dict)
return x_dict
def get_document_vector(x_dict,document):
"""
get the vector with x_dict
if the word in x_dict appears in the document,the value of x is 1
else the value of x is 0
"""
x_vector = [0] * len(x_dict) #the initial value is 0
for word in document:
if word in x_dict:
x_vector[x_dict.index(word)] = 1
else:
print('Hello world !')
return x_vector
def train_data(x_dict,postingList,classVec):
"""
get the parameters
fi_y
fi_jy1 while the words are abusive
fi_jy0 while normal
"""
fi_y = (sum([i for i in classVec if i == 1]))/(len(classVec))
fi_jy1 = [1] * len(x_dict)
fi_jy0 = [1]*len(x_dict)
y0 = 2
y1 = 2
for document in postingList:
x_vector = get_document_vector(x_dict,document)
y = classVec[postingList.index(document)]
if y == 1:
fi_jy1 = [fi_jy1[i]+x_vector[i] for i in range(len(x_vector))]
y1 = y1 +1
elif y ==0:
fi_jy0 = [fi_jy0[i]+x_vector[i] for i in range(len(x_vector))]
y0 = y0 +1
fi_jy0 = [fi/y0 for fi in fi_jy0]
fi_jy1 = [fi/y1 for fi in fi_jy1]
return fi_y,fi_jy1,fi_jy0
def classifyNB(dataset,fi_y,fi_jy0,fi_jy1):
fi_jy1 = np.array(fi_jy1)
fi_jy0 = np.array(fi_jy0)
p1 = (sum(dataset*fi_jy1)*fi_y)/((sum(dataset*fi_jy1))*fi_y+(sum(dataset*fi_jy0))*(1-fi_y))
p0 = 1-p1
if p1>p0:
return 1
else:
return 0
def testingNB():
postingList,classVec = loadDataSet()
x_dict = get_feature_vector(postingList)
fi_y,fi_jy1,fi_jy0 = train_data(x_dict,postingList,classVec)
testEntry = ['love','my','dalmation']
dataset = get_document_vector(x_dict,testEntry)
dataset = array(dataset)
p = classifyNB(dataset,fi_y,fi_jy0,fi_jy1)
print(testEntry,'classify as :',p)
testEntry = ['stupid','garbage']
dataset = get_document_vector(x_dict,testEntry)
dataset = array(dataset)
p = classifyNB(dataset,fi_y,fi_jy0,fi_jy1)
print(testEntry,'classify as :',p)
testEntry = ['I','don\'t','want','to','be','a','stupid','guy']
dataset = get_document_vector(x_dict,testEntry)
dataset = array(dataset)
p = classifyNB(dataset,fi_y,fi_jy0,fi_jy1)
print(testEntry,'classify as :',p)
testEntry = ['you','are','a','dog']
dataset = get_document_vector(x_dict,testEntry)
dataset = array(dataset)
p = classifyNB(dataset,fi_y,fi_jy0,fi_jy1)
print(testEntry,'classify as :',p)
def train_data_2(x_dict,postingList,classVec):
"""
event model:calculate the frequency of the words appearing in spam(non-spam) email
"""
fi_y = (sum([i for i in classVec if i == 1]))/(len(classVec))
fi_jy1 = [1] * len(x_dict)
fi_jy0 = [1]*len(x_dict)
y0 = len(x_dict)
y1 = len(x_dict)
for email in postingList:
y = classVec[postingList.index(email)]
if y == 1:
y1 = y1 + len(email)
for word in email:
fi_jy1[x_dict.index(word)] += 1
elif y == 0:
y0 = y0 + len(email)
for word in email:
fi_jy0[x_dict.index(word)] += 1
fi_jy0 = [fi/y0 for fi in fi_jy0]
fi_jy1 = [fi/y1 for fi in fi_jy1]
return fi_y,fi_jy1,fi_jy0
def string_parse(email_txt):
regex = re.compile('\\W+')
vocabulary_list = regex.split(email_txt)
vocabulary_list = [word.lower() for word in vocabulary_list if len(word) > 2]
return vocabulary_list
def testNB2():
postingList = []
classVec = []
#load data
for i in range(1,26):
email_spam = string_parse(open('email/spam/%d.txt'%(i)))
postingList.append(email_spam)
classVec.append(1)
email_normal = string_parse(open('email/ham/%d.txt'%(i)))
postingList.append(email_normal)
classVec.append(0)
x_dict = get_feature_vector(postingList)
#split data into training/test set
test_data = []
test_value = []
for i in range(10):
index = int(random.uniform(1,50))
test_data.append(postingList[index])
test_value.append(classVec[index])
del(postingList[index])
del(classVec[index])
fi_y,fi_jy1,fi_jy0 = train_data_2(x_dict,postingList,classVec)
res_test = [classifyNB(dataset,fi_y,fi_jy0,fi_jy1) for dataset in test_data]
prob = sum([abs(res_test[i]-test_value[i]) for i in range(len(res_test))])/len(res_test)
print('the error rate of classify is %d'%(prob))
| [
"[email protected]"
] | |
e5500f8613dd97c63af38a515d3fcaed24f1edfc | ef3fe422fc5644ce37cef2e8eb47a615e0865f27 | /0x00-python_variable_annotations/100-safe_first_element.py | a68a172a7b3aeffd93fd5ece78bd0461e3d8fca2 | [] | no_license | Manuelpv17/holbertonschool-web_back_end | b1b6d993b378f60e3d2312079b49fb059a2e14a7 | c4c60bf08648a8e9c846147808b6a7fbd9a818a7 | refs/heads/main | 2023-08-27T11:10:50.496692 | 2021-10-17T16:54:21 | 2021-10-17T16:54:21 | 366,537,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | #!/usr/bin/env python3
""" 10. Duck typing - first element of a sequence """
from typing import Sequence, Union, Any
def safe_first_element(lst: Sequence[Any]) -> Union[Any, None]:
""" 10. Duck typing - first element of a sequence """
if lst:
return lst[0]
else:
return None
| [
"[email protected]"
] | |
fdc09392606dbaa4da061b3a530db0f87a8dc68c | 8771c94dce3c7e30c9e5b5f45cf8683ba9cac6fd | /leetcode/algorithms/p0338_counting_bits_1.py | 369900a44f586dcd107afb5c442e1ac2172ed57f | [] | no_license | J14032016/LeetCode-Python | f2a80ecb7822cf12a8ae1600e07e4e6667204230 | 9a8f5329d7c48dd34de3105c88afb5e03c2aace4 | refs/heads/master | 2023-03-12T02:55:45.094180 | 2021-03-07T07:55:03 | 2021-03-07T07:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from typing import List
class Solution:
def countBits(self, num: int) -> List[int]:
return [self._hammingWeight(x) for x in range(num + 1)]
def _hammingWeight(self, n: int) -> int:
count = 0
while n > 0:
n = n & (n - 1)
count += 1
return count
| [
"[email protected]"
] | |
be0f715176bf5675a0da875c7653b2b3412b16bb | 05931ab3da3c0f6b0f400e38c503f43d54ceb869 | /CompilationTest/EnumTest/results/EnumTest.py | d0c906c8754eaf196355fca3d9268b172d9ec7f4 | [
"MIT"
] | permissive | onelang/TestArtifacts | c0881f83c703f89e6d96b1bb757195b60816cea7 | 3f067308c8da3a6f95a001ff8b2d0a0421ae3285 | refs/heads/master | 2023-03-28T14:37:25.904076 | 2021-03-21T14:15:00 | 2021-03-21T14:15:00 | 116,868,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | from enum import Enum
class TestEnum(Enum):
ITEM1 = 0
ITEM2 = 1
class TestClass:
def test_method(self):
enum_v = TestEnum.ITEM1
if 3 * 2 == 6:
enum_v = TestEnum.ITEM2
check1 = "SUCCESS" if enum_v == TestEnum.ITEM2 else "FAIL"
check2 = "FAIL" if enum_v == TestEnum.ITEM1 else "SUCCESS"
print "Item1: %s, Item2: %s, checks: %s %s" % (TestEnum.ITEM1, enum_v, check1, check2)
try:
TestClass().test_method()
except Exception as err:
print "Exception: " + err.message | [
"[email protected]"
] | |
a145c3315ea277e5e90983ce02f14e0bc9163f53 | b825709ddfe1394daa0579e53d69994cb680735b | /zad2_2.py | 5d8ada69e05ff442c077304398ec5de5c3eeb5cc | [] | no_license | ZajMar/Python2017MZ | 150873aa23688671b3cfab5df3e6eb684991db18 | d8cd0197f34914397ba6c10180629b79b43b1732 | refs/heads/master | 2021-08-29T10:49:55.290201 | 2017-12-13T19:27:34 | 2017-12-13T19:27:34 | 114,157,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import math
class Complex():
real=0
imag=0
def __init__(self, real, imag=0.0):
self.real = real
self.imag = imag
def __str__(self):
return '(%s, %s)' % (self.real, self.imag)
def add(x,y):
return Complex(x.real+y.real,x.imag+y.imag)
def sub(x,y):
return Complex(x.real-y.real,x.imag-y.imag)
def mod(self):
return math.sqrt(self.real*self.real+self.imag*self.imag)
def _sub_(self,other):
return Complex(self.real-other.real,self.imag-other.imag)
def _add_(self,other):
return Complex(self.real+other.real,self.imag+other.imag)
def neg(self):
return Complex(-self.real,-self.imag)
a = Complex (1.0,1.0)
b=Complex(3.0,4.0)
print("a = "+str(a))
print("b = "+str(b))
print("Sum: ")
print(b.add(a))
print("Mod: ")
print(a.mod())
print("Neg: ")
print(b.neg())
| [
"[email protected]"
] | |
1066d6e1a4cd83fc17f1bb1db30c277b655bcba1 | 5a0409c341b1104738ef5a1c9c6f58fc0c0b4024 | /tscraper.py | fd9a935db374a01ef70ef0d684511f1b4c73aaff | [] | no_license | virgoaugustine/TonaScrapper | ae441b17f53fa077b117a965e9aa95dd883416c6 | c98f20ad8f125232484d5dade9cbf82c48de5721 | refs/heads/master | 2022-12-21T00:27:55.877317 | 2020-09-26T13:44:15 | 2020-09-26T13:44:15 | 298,618,345 | 0 | 0 | null | 2020-09-25T15:59:08 | 2020-09-25T15:59:07 | null | UTF-8 | Python | false | false | 2,536 | py | import requests
from bs4 import BeautifulSoup
import csv
import time
def get_page(url):
response = requests.get(url)
if not response.ok:
print("Server responded:", response.status_code)
else:
soup = BeautifulSoup(response.content, "html.parser")
return soup
def get_detail_data(soup):
try:
title = soup.find("h1", class_="title--3s1R8").text
except:
title = ""
try:
date = soup.find("h3", class_="sub-title--37mkY").text
except:
date = ""
try:
price = soup.find("div", class_="amount--3NTpl").text
price = price.split(maxsplit=1)[-1]
except:
price = ""
data = {
"title": title,
"date": date,
"price": price,
}
return data
def get_index_data(soup):
try:
links = soup.findAll("a", class_="card-link--3ssYv gtm-ad-item")
except:
links = []
urls = ["https://tonaton.com"+item.get("href") for item in links]
return urls
def write_csv(data, url):
with open("tonatonoutput.csv", "a") as csvfile:
writer = csv.writer(csvfile)
row = [data["title"], data["date"], data["price"], url]
writer.writerow(row)
def main():
search_item = "cars" #change this to the item you want to search for.
page_num = 1 #start at page 1 of the search query
flag = True #Set a flag to control how the while loop runs.
while flag:
url = f"https://tonaton.com/en/ads/ghana/{search_item}?page={page_num}"
page = get_page(url)
page_end = page.find('div', class_="no-result-text--16bWr")
#If you get to a page with no results, that's the end then.
#The page_end variable will thus contain a string "No results found."
#while empty, the page_end variable evaluates to False, and changes when the variable's content changes.
if page_end:
flag = False #set flag to false once we get 'No results found so the while loop terminates'
return 'Script finished successfully.' #This return statement prevents the code below from executing.
products = get_index_data(page)
for link in products:
data = get_detail_data(get_page(link))
write_csv(data, link)
page_num += 1 #Increase page_num by 1 to go to the next page in the next iteration of the while loop.
return "Script completed."
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
10d914f403ac5bfd4aacc7330c3db318947f429e | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/net/net_ifgrp_info.py | 51fb53a5a5d184165370e0966a17a0a5662d4247 | [
"MIT"
] | permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,702 | py | from netapp.netapp_object import NetAppObject
class NetIfgrpInfo(NetAppObject):
"""
Network interface group information
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_node = None
@property
def node(self):
"""
Specifies the name of node.
Attributes: key, required-for-create, non-modifiable
"""
return self._node
@node.setter
def node(self, val):
if val != None:
self.validate('node', val)
self._node = val
_up_ports = None
@property
def up_ports(self):
"""
Specifies all active ports of an ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._up_ports
@up_ports.setter
def up_ports(self, val):
if val != None:
self.validate('up_ports', val)
self._up_ports = val
_down_ports = None
@property
def down_ports(self):
"""
Specifies all inactive ports of an ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._down_ports
@down_ports.setter
def down_ports(self, val):
if val != None:
self.validate('down_ports', val)
self._down_ports = val
_mac_address = None
@property
def mac_address(self):
"""
Specifies the MAC address of the ifgrp.
For example: '02:0c:29:78:e1:b7'
Attributes: non-creatable, non-modifiable
"""
return self._mac_address
@mac_address.setter
def mac_address(self, val):
if val != None:
self.validate('mac_address', val)
self._mac_address = val
_ifgrp_name = None
@property
def ifgrp_name(self):
"""
Specifies the interface group name.
Attributes: key, required-for-create, non-modifiable
"""
return self._ifgrp_name
@ifgrp_name.setter
def ifgrp_name(self, val):
if val != None:
self.validate('ifgrp_name', val)
self._ifgrp_name = val
_mode = None
@property
def mode(self):
"""
Specifies the link policy for the ifgrp.
Possible values:
<ul>
<li> 'multimode - All links are simultaneously
active',
<li> 'multimode_lacp - Link state is managed by the
switch using link aggregation control protocol (LACP)
(IEEE 802.3ad)',
<li> 'singlemode - Only one link is active at a
time'
</ul>
Attributes: required-for-create, non-modifiable
"""
return self._mode
@mode.setter
def mode(self, val):
if val != None:
self.validate('mode', val)
self._mode = val
_port_participation = None
@property
def port_participation(self):
"""
Port participation state of the ifgrp.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "full" - Indicates all the ifgrp ports are
active,
<li> "partial" - Indicates not all the ifgrp ports
are active,
<li> "none" - Indicates none of the ifgrp ports is
active
</ul>
"""
return self._port_participation
@port_participation.setter
def port_participation(self, val):
if val != None:
self.validate('port_participation', val)
self._port_participation = val
_ports = None
@property
def ports(self):
"""
List of ports associated with this ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._ports
@ports.setter
def ports(self, val):
if val != None:
self.validate('ports', val)
self._ports = val
_distribution_function = None
@property
def distribution_function(self):
"""
Specifies the traffic distribution function for the
ifgrp.
Attributes: required-for-create, non-modifiable
Possible values:
<ul>
<li> "mac" - Network traffic is distributed
on the basis of MAC addresses,
<li> "ip" - Network traffic is distributed
on the basis of IP addresses,
<li> "sequential" - Network traffic is distributed
round-robin to each interface,
<li> "port" - Network traffic is distributed
by transport layer address 4-tuple
</ul>
"""
return self._distribution_function
@distribution_function.setter
def distribution_function(self, val):
if val != None:
self.validate('distribution_function', val)
self._distribution_function = val
@staticmethod
def get_api_name():
return "net-ifgrp-info"
@staticmethod
def get_desired_attrs():
return [
'node',
'up-ports',
'down-ports',
'mac-address',
'ifgrp-name',
'mode',
'port-participation',
'ports',
'distribution-function',
]
def describe_properties(self):
return {
'node': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'up_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'down_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'mac_address': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ifgrp_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'mode': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'port_participation': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'distribution_function': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| [
"[email protected]"
] | |
606f2fff293fb8b975dc6cfbeac4bbc84d72b53e | 2ac9115b48feadce419b3a6a7c367809032d1803 | /modules/ED_F6.[Map GDP Changes Grid].py | ab353ab49c33fc0b731f976de0b7c3c9cdc4c9ec | [
"MIT"
] | permissive | YixuanZheng/Aerosol_Inequality_2019 | f4643910286b52e476038540aaf5509d712bf580 | 029b198311f192dbb98b96053ce0fbc55a3ab392 | refs/heads/master | 2020-08-14T07:41:17.080177 | 2020-02-06T23:21:56 | 2020-02-06T23:21:56 | 215,124,879 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,825 | py | # -*- coding: utf-8 -*-
'''
This code generates Fig. S6
Spatial distribution of economic impacts introduced by anthropogenic aerosol-induced cooling.
by Yixuan Zheng ([email protected])
'''
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap,maskoceans
import _env
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
import matplotlib
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Helvetica'
def set_latlon_ticks(ax,m):
ax.set_xticks(np.arange(-160,161,40))
ax.set_xticklabels('')
ax.set_yticks(np.arange(-90,91,45))
ax.set_yticklabels('')
parallels = np.arange(-90.,91,45.)
m.drawparallels(parallels,labels=[True,False,False,False],dashes=[3,3],xoffset=5,linewidth = 0)
meridians = np.arange(-160,161,40.)
m.drawmeridians(meridians,labels=[True,False,False,True],dashes=[3,3],yoffset=5,linewidth = 0)
p_scen = 'No-Aerosol' #aerosol removal scenario
ds = 'ERA-Interim'
odir_plot = _env.odir_root + '/plot/'
_env.mkdirs(odir_plot)
of_plot = odir_plot + 'ED_F6.Map_GDP_Changes_Grid.png'
if_mask = _env.idir_root + '/regioncode/CESM_19x26_Land-Ocean_Mask.nc'
iarr_land = Dataset(if_mask)['landmask'][:]
fig = plt.figure(figsize=(21,5))
if_rgdp = _env.odir_root + '/gdp_' + ds + '/GDP_Changes_Burke_country-lag0_2010_' + ds + '_' + p_scen + '_gridded.nc'
arr_gdp = Dataset(if_rgdp)['GDP_Ratio_Median'][:]*100 # to percent
if_temp = _env.odir_root + '/sim_temperature/Simulated_Global_Gridded_TREFHT.nc'
arr_t_pval = Dataset(if_temp)['TREFHT_P_No-Aerosol_With-Aerosol'][:]
ax = fig.add_subplot(111)
m = Basemap(ellps = 'WGS84',
llcrnrlon=-180,llcrnrlat=-90, urcrnrlon=177.5,urcrnrlat=90.,
suppress_ticks=True)#resolution='i',
m.drawmapboundary()
lat = Dataset(if_temp)['lat'][:]
lon = Dataset(if_temp)['lon'][:]
#rearrange matrix for plot
lon_tmp = lon.copy()
lon[72:144] = lon[0:72]
lon[0:72] = lon_tmp[72:144]
lon[lon>=180] = lon[lon>=180] - 360
arr_gdp_tmp = arr_gdp.copy()
arr_gdp[:,72:144] = arr_gdp[:,0:72]
arr_gdp[:,0:72] = arr_gdp_tmp[:,72:144]
#mask Atlantic
arr_gdp[0:21,:] = np.nan
arr_t_pval_tmp = arr_t_pval.copy()
arr_t_pval[:,72:144] = arr_t_pval[:,0:72]
arr_t_pval[:,0:72] = arr_t_pval_tmp[:,72:144]
x,y = np.meshgrid(lon,lat)
lat_os = 180/95.
lon_os = 360/144.
lon_ = lon+2.5/2
lat_ = lat+lat_os/2
lon_arr = np.repeat(lon_[np.newaxis,:],96,axis=0)
lat_arr = np.repeat(lat_[:,np.newaxis],144,axis=1)
arr_gdp_ocean_masked = maskoceans(np.repeat(lon_[np.newaxis,:],96,axis=0),np.repeat(lat_[:,np.newaxis],144,axis=1),arr_gdp)
my_cmap = ListedColormap(sns.color_palette('RdBu_r',20).as_hex()[2:18])
cs = m.pcolormesh(lon,lat,-np.squeeze(arr_gdp_ocean_masked),cmap=my_cmap, vmin=-1.6, vmax=1.6) #,legend_ticks)
# add colorbar.
cbar = m.colorbar(cs,location='right',pad="5%",ticks=[-1.4,-1.2,-1.0,-0.8,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,0.8,1.0,1.2,1.4][::2])
cbar.set_label('(%)',fontsize = 14,rotation=270,labelpad=18)
cbar.ax.set_yticklabels([-1.4,-1.2,-1.0,-0.8,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,0.8,1.0,1.2,1.4][::2],size=14)
m.readshapefile(_env.idir_root + '/shape/kx-world-coastline-110-million-SHP/world-coastline-110-million',
'coastline',drawbounds=True,linewidth=0.8,color='k',
zorder=2)
arr_t_pval_ocean_masked = maskoceans(np.repeat(lon_[np.newaxis,:],96,axis=0),np.repeat(lat_[:,np.newaxis],144,axis=1),arr_t_pval)
arr_t_pval_ocean_masked = arr_t_pval_ocean_masked.filled(-1)
lon_arr_m = lon_arr[np.where(arr_t_pval_ocean_masked>0.05)]
lat_arr_m = lat_arr[np.where(arr_t_pval_ocean_masked>0.05)]
x,y = m(lon_arr_m, lat_arr_m)
m.scatter(x, y, marker='+', color='black', zorder=5,s=0.1,alpha=0.4)
set_latlon_ticks(ax,m)
plt.savefig(of_plot, dpi=300,bbox_inches='tight')
| [
"[email protected]"
] | |
dd25744be735043c3fb248a419e1a7d5f9c2a44d | ea29963a93cf9cdafd139e1f7d8ea3f12ebd67bf | /service1/app/models/user.py | 389c270c8150a9780a889bb9453e80ed7f823f07 | [] | no_license | gojeboy/demo-architecture | 42ff97038a7e7bf552525da76d77667e022bd66c | 07b2ab66874a9e545b204ad434f901b7cbd6cfe9 | refs/heads/master | 2023-01-22T15:15:00.752219 | 2020-04-04T16:16:40 | 2020-04-04T16:16:40 | 251,533,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from app import db, app
class User(db.Model):
__tablename__ = "tbl_user"
__table_args__ = {"extend_existing": True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200))
surname = db.Column(db.String(200))
# addresses = db.relationship("address")
def __init__(self, name, surname):
self.name = name
self.surname = surname
def save(self):
print(app.config)
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_users():
return db.session.query(User).all()
def serialize(self):
return {"id": self.id, "name": self.name, "surname": self.surname}
| [
"[email protected]"
] | |
b86128aee5418c0b7ac108bd068d443064cc3ec0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_sermon.py | 40a9742cbaf0299a9d7ec6767d646bfc24b37d57 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py |
#calss header
class _SERMON():
def __init__(self,):
self.name = "SERMON"
self.definitions = [u'a part of a Christian church ceremony in which a priest gives a talk on a religious or moral subject, often based on something written in the Bible: ', u'a long talk in which someone advises other people how they should behave in order to be better people: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] |
Subsets and Splits