blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4ffd485124e4384a907a18abd1956cae91369f6 | a5e88c5e8ee613b8643a3cc15a866c9328e5949d | /repomd/yumrepo.py | 1407c46ec6a8b2d44f94f108f570eeb474e15899 | [] | no_license | jctanner/rbuild-yumcheckout-plugin | f26a0da5e206da90b77ca505e73814a2840a978e | 782e5f63ec9082d4972229c770bb7cf6b7f947d1 | refs/heads/master | 2021-01-23T13:54:46.022213 | 2012-10-19T15:00:55 | 2012-10-19T15:00:55 | 4,719,725 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,779 | py | from linkparser import *
from yumpackage import *
from yumgroup import *
import urllib2
from urllib2 import urlopen
import re
from xml.dom.minidom import parse, parseString
import StringIO
import gzip
class yumRepo(object):
def __init__(self, url):
#self.id = id
self.url = url
self.repodataurl = ""
self.metafiles = []
self.primarymetafiles = []
self.groupmetafiles = []
self.packages = []
self.groups = []
#generated
self.uniquepackages = []
self.latestpackages = []
self.upstreamtroves = []
self.localtroves = []
self.missingtroves = []
#self.getUrl()
self.findMetaFiles()
self.parseRepomd()
self.parsePrimaryXML()
self.parseCompsXML()
def getUrl(self):
print self.url
def addPackage(self, package):
print "INFO: adding %s" % package.name
self.packages.append(package)
def listPackages(self):
for pkg in self.packages:
print "%s-%s-%s-%s.rpm" % (pkg.name, pkg.version, pkg.release, pkg.arch)
def uniquepPackages(self):
pass
def findLatestPackageVer(self, pkgname, arch):
latestpkg = yumPackage(pkgname, '0', '0', '0', '0', '', '')
for pkg in self.packages:
if (pkg.name == pkgname) and (pkg.arch == arch):
if (pkg.version > latestpkg.version):
if (pkg.release >= latestpkg.release):
if (pkg.epoch >= latestpkg.epoch):
latestpkg.version = pkg.version
latestpkg.release = pkg.release
latestpkg.epoch = pkg.epoch
latestpkg.arch = pkg.arch
latestpkg.repourl = pkg.repourl
latestpkg.url = pkg.url
print "INFO: %s is latest for %s " % (latestpkg.url, self.url)
#epdb.st()
else:
#epdb.st()
print "INFO: %s !> %s" % (pkg.url, latestpkg.url)
return latestpkg
def addGroup(self, group):
self.groups.append(group)
def parseRepomd(self):
for filename in self.metafiles:
if re.search('repomd', filename):
print "INFO: repomd file = %s" % filename
# get repomd
#import epdb; epdb.st()
compdom = parse(urlopen(self.repodataurl + "/" + filename))
for node in compdom.getElementsByTagName('data'):
nodetype = node.attributes['type'].value
#from conary.lib import epdb; epdb.st()
#method 1
#nodelocation = node.childNodes[1].attributes['href'].value
#method 2
nodelocation = node.getElementsByTagName('location')[0].attributes['href'].nodeValue
print "INFO: xmlurl -- %s %s" % (nodetype, nodelocation)
if nodetype.encode('utf8') in "primary":
print "INFO: add %s as primary" % nodelocation
self.primarymetafiles.append(nodelocation.encode('utf8'))
if nodetype.encode('utf8') in "group":
print "INFO: add %s as comps" % nodelocation
self.groupmetafiles.append(nodelocation.encode('utf8'))
# get primary filename
# get comps filename
#import epdb; epdb.st()
def findMetaFiles(self):
#epdb.st()
repourl = self.url + "/repodata"
#epdb.st()
print "INFO: attemping to parse %s" % repourl
req = urllib2.Request(repourl)
response = urllib2.urlopen(req)
if response.code is "200":
print "ERROR: repository does not have a repodata directory"
return
self.repodataurl = repourl
data = response.read()
parser = linkParser()
parser.parse(data)
for filename in parser.get_hyperlinks():
#print filename
if re.search('xml', filename):
print "INFO: add %s to metafiles" % filename
self.metafiles.append(filename)
# HTML != XML ... can't use parse
#compdom = parse(urlopen(repourl))
#compdom = parse(response.read())
#import epdb; epdb.st()
def parsePrimaryXML(self):
print "INFO: parsing primary.xml"
for filename in self.primarymetafiles:
#import epdb; epdb.st()
if filename.endswith('.gz'):
print "INFO: %s is compressed, retrieving %s " % (filename, (self.url + "/" + filename))
resp = urlopen(self.url + "/" + filename)
output = StringIO.StringIO()
output.write(resp.read())
output.seek(0)
decompressed = gzip.GzipFile(fileobj=output)
#xml = decompressed.read()
#compdom = parse(decompressed.read())
#compdom = parse(xml)
compdom = parse(decompressed)
for node in compdom.getElementsByTagName('package'):
#epdb.st()
pkgname = node.getElementsByTagName('name')[0].childNodes[0].nodeValue.encode('utf8')
pkgarch = node.getElementsByTagName('arch')[0].childNodes[0].nodeValue.encode('utf8')
pkgepoch = node.getElementsByTagName('version')[0].attributes['epoch'].value.encode('utf8')
pkgvers = node.getElementsByTagName('version')[0].attributes['ver'].value.encode('utf8')
pkgrel = node.getElementsByTagName('version')[0].attributes['rel'].value.encode('utf8')
pkgloc = node.getElementsByTagName('location')[0].attributes['href'].value.encode('utf8')
pkgsumtype = node.getElementsByTagName('checksum')[0].attributes['type'].value.encode('utf8')
pkgsum = node.getElementsByTagName('checksum')[0].childNodes[0].nodeValue.encode('utf8')
try:
pkgpackager = node.getElementsByTagName('packager')[0].childNodes[0].nodeValue.encode('utf8')
except:
pkgpackager = 'none'
#from conary.lib import epdb; epdb.st()
# name, epoch, version, release, arch, location
package = yumPackage(pkgname, pkgepoch, pkgvers, pkgrel, pkgarch, pkgloc, self.url)
package.sumtype = pkgsumtype
package.sum = pkgsum
package.packager = pkgpackager
self.addPackage(package)
else:
print "INFO: %s is not compressed" % filename
def parseCompsXML(self):
"""
missingpackages = ['bsh-groupfile',
'ctdb',
'ctdb-devel',
'ecs-groupfile',
'kernel-debug',
'kernel-debug-devel',
'kernel-xen',
'kernel-xen-devel',
'kmod-be2iscsi-xen-rhel5u5',
'kmod-be2net-xen-rhel5u5',
'kmod-cmirror',
'kmod-cmirror-xen',
'kmod-gfs kmod-gfs-xen',
'kmod-gnbd kmod-gnbd-xen',
'kmod-igb-xen-rhel5u5',
'kmod-lpfc-xen-rhel5u5',
'serviceguard',
'sgcmom',
'vmware-open-vm-tools-common',
'vmware-open-vm-tools-nox',
'kmod-gfs',
'kmod-gfs-xen',
'kmod-gnbd',
'kmod-gnbd-xen'
]
"""
missingpackages = []
"""
conflictpackages = ['samba3x', 'samba3x-client', 'samba3x-common',
'samba3x-swat', 'samba3x-winbind'
'postgresql184', 'postgresql84-contrib', 'postgresql84-devel'
'postgresql84-docs', 'postgresql84-plperl', 'postgresql84-plpython'
'postgresql84-pltcl', 'postgresql84-python', 'postgresql84-server',
'postgresql84-tcl','postgresql84-test',
'php53', 'php53-bcmath', 'php53-cli', 'php53-dba', 'php53-devel',
'php53-gd', 'php53-imap', 'php53-ldap', 'php53-mbstring',
'php53-mysql', 'php53-odbc', 'php53-pdo', 'php53-pgsql'
'php53-snmp', 'php53-soap', 'php53-xml', 'php53-xmlrpc',
'freeradius2', 'freeradius2-ldap', 'freeradius2-utils',
'bind97', 'bind97-devel', 'bind97-utils'
]
"""
conflictpackages = []
"""
badpackages = ['cisco-vm-grub-config']
"""
badpackages = []
excludepackages = list(missingpackages)
excludepackages += list(conflictpackages)
excludepackages += badpackages
#debug
excludepackages = conflictpackages
#excludegroups = ['cisco-patchbundle-nonreboot']
excludegroups = []
for filename in self.groupmetafiles:
print "DEBUG: handling %s" % filename
try:
print "DEBUG: getting comps from %s" % (self.url + "/" + filename)
except:
epdb.st()
print "DEBUG: getting comps from %s" % (self.url + "/" + filename)
compdom = parse(urlopen(self.url + "/" + filename))
for node in compdom.getElementsByTagName('group'):
#find the id for this group in the dom
group_id = node.getElementsByTagName('id')[0].childNodes[0].nodeValue
conarygroupname = "" + group_id.encode('utf-8')
conarygroupname = conarygroupname.lower()
conarygroupname = re.sub('\s+','-',conarygroupname)
conarygroupname = re.sub('/','-',conarygroupname)
conarygroupname = re.sub('\(','',conarygroupname)
conarygroupname = re.sub('\)','',conarygroupname)
grp = yumGroup(conarygroupname)
print "DEBUG: processing group - %s" % grp.id
packages = node.getElementsByTagName('packagereq')
for package in packages:
#use the value of the first index for each package name
pname = package.childNodes[0].nodeValue
print "DEBUG: \tpackage: %s" % pname
#add packagename to the yumgroup object
if pname.encode('utf-8') not in excludepackages:
grp.addpackage(pname.encode('utf-8'))
#add this group to the list of all groups
if conarygroupname not in excludegroups:
#grpMap.append(grp)
self.groups.append(grp)
print "DEBUG: comps processed from % s" % self.url
def findLatestPackages(self):
self.latestpackages = []
latesthash = {}
#for pkg in self.uniquepackages:
#for pkg in self.uniquepackages:
for pkg in self.packages:
key = pkg.name + "-" + pkg.arch
if not latesthash.has_key(key):
latesthash[key] = pkg
else:
#if key is "kernel-x86_64":
# epdb.st()
#if self.pkgIsOlder(latesthash[key], pkg):
if self.pkgIsNewer(pkg, latesthash[key]):
latesthash[key] = pkg
for key in latesthash.keys():
self.latestpackages.append(latesthash[key])
#epdb.st()
def pkgIsNewer(self, p, q):
# p is newer than q ?
if p.name == q.name:
if p.version > q.version:
return True
elif p.version == q.version:
if p.release > q.release:
return True
elif p.release == q.release:
if p.epoch > q.epoch:
return True
elif p.epoch == q.epoch:
return False
else:
return False
else:
return False
else:
return False
| [
"[email protected]"
] | |
3ec75456e5cf113904ab7c17e0059d937c023644 | 373939995a89ed84a26653bf4b11e02b9e060b3d | /20210503PythonAdvanced/05-contextmanager/ctx01.py | 48cab9fb6ae9619ae2f1d2d1236c1f7fab38fe4e | [
"MIT"
] | permissive | AuroraBoreas/pypj_sonic_pc | 28406f1951280b9349a25fdbd0ad02bae8adc316 | 3016ed173d912e2ffa08c8581c98a5932c486467 | refs/heads/master | 2023-09-01T15:04:36.246303 | 2023-08-25T01:05:28 | 2023-08-25T01:05:28 | 279,821,926 | 0 | 0 | MIT | 2022-06-22T04:52:25 | 2020-07-15T09:15:32 | Python | UTF-8 | Python | false | false | 702 | py | "#Python is a protocol orientated lang; every top-level function has a corresponding dunder method implemented;"
import sqlite3
with sqlite3.connect('test.db') as conn:
cur = conn.cursor()
cur.execute('CREATE TABLE points(x int, y int);')
cur.execute('INSERT INTO points(x, y) VALUES(1, 1);')
cur.execute('INSERT INTO points(x, y) VALUES(1, 2);')
cur.execute('INSERT INTO points(x, y) VALUES(2, 1);')
cur.execute('INSERT INTO points(x, y) VALUES(2, 2);')
for row in cur.execute('SELECT x, y FROM points;'):
print(row)
for row in cur.execute('SELECT sum(x * y) FROM points;'):
print(row)
cur.execute('DROP TABLE points;') | [
"[email protected]"
] | |
6b9810de0500f330dc7287b1f9411c40fcb595b6 | 9d939a4909a75a268e8d4dfd18a0da7fbbae4b0a | /astropy/coordinates/tests/test_velocity_corrs.py | e43e1f05ca508c092480425daf991e1ded97656c | [
"BSD-3-Clause"
] | permissive | aboucaud/astropy | 023db2dea40bc03bb76b4a7a85f93f6a5064dd0d | cb3227199053440555ad7a92842f5e0fa9a2d3db | refs/heads/master | 2020-12-14T09:52:55.026630 | 2017-06-26T15:30:19 | 2017-06-26T15:30:19 | 95,464,994 | 0 | 0 | null | 2017-06-26T16:07:50 | 2017-06-26T16:07:50 | null | UTF-8 | Python | false | false | 16,192 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
import numpy as np
from ...tests.helper import assert_quantity_allclose
from ... import units as u
from ...time import Time
from .. import EarthLocation, SkyCoord, Angle
from ..sites import get_builtin_sites
@pytest.mark.parametrize('kind', ['heliocentric', 'barycentric'])
def test_basic(kind):
t0 = Time('2015-1-1')
loc = get_builtin_sites()['example_site']
sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc)
rvc0 = sc.radial_velocity_correction(kind)
assert rvc0.shape == ()
assert rvc0.unit.is_equivalent(u.km/u.s)
scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10)*u.day,
location=loc)
rvcs = scs.radial_velocity_correction(kind)
assert rvcs.shape == (10,)
assert rvcs.unit.is_equivalent(u.km/u.s)
test_input_time = Time(2457244.5, format='jd')
#test_input_loc = EarthLocation.of_site('Cerro Paranal')
# to avoid the network hit we just copy here what that yields
test_input_loc = EarthLocation.from_geodetic(lon=-70.403*u.deg,
lat=-24.6252*u.deg,
height=2635*u.m)
def test_helio_iraf():
"""
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
"""
# this is based on running IRAF with the output of `generate_IRAF_input` below
rvcorr_result="""
# RVCORRECT: Observatory parameters for European Southern Observatory: Paranal
# latitude = -24:37.5
# longitude = 70:24.2
# altitude = 2635
## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR
2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253
2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560
2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313
2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534
2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277
2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311
2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785
2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704
2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349
2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741
2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463
2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831
2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670
2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263
2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808
2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058
2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897
2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527
2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511
2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721
2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994
2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586
2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601
2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832
2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874
2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995
2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164
2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238
2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607
2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829
2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111
2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734
2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719
2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928
2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202
2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378
2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393
2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625
2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058
2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897
2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491
2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419
2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831
2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670
2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664
2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583
2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227
2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137
2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584
2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311
2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533
2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721
2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313
2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194
2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888
2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935
"""
vhs_iraf = []
for line in rvcorr_result.strip().split('\n'):
if not line.strip().startswith('#'):
vhs_iraf.append(float(line.split()[2]))
vhs_iraf = vhs_iraf*u.km/u.s
targets = SkyCoord(_get_test_input_radecs(), obstime=test_input_time,
location=test_input_loc)
vhs_astropy = targets.radial_velocity_correction('heliocentric')
assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150*u.m/u.s)
return vhs_astropy, vhs_iraf # for interactively examination
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = _get_test_input_radecs()
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=':')
decstr = Angle(dec).to_string(u.deg, sep=':')
msg = '{yr} {mo} {day} {uth}:{utmin} {ra} {dec}'
lines.append(msg.format(yr=dt.year, mo=dt.month, day=dt.day,
uth=dt.hour, utmin=dt.minute,
ra=rastr, dec=decstr))
if writefn:
with open(writefn, 'w') as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print('Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal')
def _get_test_input_radecs():
ras = []
decs = []
for dec in np.linspace(-85, 85, 15):
nra = int(np.round(10*np.cos(dec*u.deg)).value)
ras1 = np.linspace(-180, 180-1e-6, nra)
ras.extend(ras1)
decs.extend([dec]*len(ras1))
return SkyCoord(ra=ras, dec=decs, unit=u.deg)
def test_barycorr():
# this is the result of calling _get_barycorr_bvcs
barycorr_bvcs = u.Quantity([
-10335.93326096, -14198.47605491, -2237.60012494, -14198.47595363,
-17425.46512587, -17131.70901174, 2424.37095076, 2130.61519166,
-17425.46495779, -19872.50026998, -24442.37091097, -11017.08975893,
6978.0622355 , 11547.93333743, -1877.34772637, -19872.50004258,
-21430.08240017, -27669.14280689, -16917.08506807, 2729.57222968,
16476.49569232, 13971.97171764, -2898.04250914, -21430.08212368,
-22028.51337105, -29301.92349394, -21481.13036199, -3147.44828909,
14959.50065514, 22232.91155425, 14412.11903105, -3921.56359768,
-22028.51305781, -21641.01479409, -29373.0512649 , -24205.90521765,
-8557.34138828, 10250.50350732, 23417.2299926 , 24781.98057941,
13706.17339044, -4627.70005932, -21641.01445812, -20284.92627505,
-28193.91696959, -22908.51624166, -6901.82132125, 12336.45758056,
25804.51614607, 27200.50029664, 15871.21385688, -2882.24738355,
-20284.9259314 , -18020.92947805, -25752.96564978, -20585.81957567,
-4937.25573801, 13870.58916957, 27037.31568441, 28402.06636994,
17326.25977035, -1007.62209045, -18020.92914212, -14950.33284575,
-22223.74260839, -14402.94943965, 3930.73265119, 22037.68163353,
29311.09265126, 21490.30070307, 3156.62229843, -14950.33253252,
-11210.53846867, -17449.59867676, -6697.54090389, 12949.11642965,
26696.03999586, 24191.5164355 , 7321.50355488, -11210.53819218,
-6968.89359681, -11538.76423011, 1886.51695238, 19881.66902396,
24451.54039956, 11026.26000765, -6968.89336945, -2415.20201758,
-2121.44599781, 17434.63406085, 17140.87871753, -2415.2018495 ,
2246.76923076, 14207.64513054, 2246.76933194, 6808.40787728],
u.m/u.s)
# this tries the *other* way of calling radial_velocity_correction relative
# to the IRAF tests
targets = _get_test_input_radecs()
bvcs_astropy = targets.radial_velocity_correction(obstime=test_input_time,
location=test_input_loc,
kind='barycentric')
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=5*u.m/u.s)
return bvcs_astropy, barycorr_bvcs # for interactively examination
def _get_barycorr_bvcs(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from ...utils.console import ProgressBar
bvcs = []
for ra, dec in ProgressBar(list(zip(coos.ra.deg, coos.dec.deg)),
ipython_widget=injupyter):
res = barycorr.bvc(test_input_time.utc.jd, ra, dec,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
elevation=loc.geodetic[2].to(u.m).value)
bvcs.append(res)
return bvcs*u.m/u.s
def test_rvcorr_multiple_obstimes_onskycoord():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
arrtime = Time('2005-03-21 00:00:00') + np.linspace(-1, 1, 10)*u.day
sc = SkyCoord(1*u.deg, 2*u.deg, 100*u.kpc, obstime=arrtime, location=loc)
rvcbary_sc2 = sc.radial_velocity_correction(kind='barycentric')
assert len(rvcbary_sc2) == 10
# check the multiple-obstime and multi- mode
sc = SkyCoord(([1]*10)*u.deg, 2*u.deg, 100*u.kpc,
obstime=arrtime, location=loc)
rvcbary_sc3 = sc.radial_velocity_correction(kind='barycentric')
assert len(rvcbary_sc3) == 10
def test_invalid_argument_combos():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
timel = Time('2005-03-21 00:00:00', location=loc)
scwattrs = SkyCoord(1*u.deg, 2*u.deg, obstime=time, location=loc)
scwoattrs = SkyCoord(1*u.deg, 2*u.deg)
scwattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction(obstime=time)
scwoattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(timel)
| [
"[email protected]"
] | |
48b78b754e439112fd0edbe53a2f1921e547ce3c | 136a379de74b2a28782cd0e2fb04da99dfabdf86 | /File-Handling/Exercise.py | 8bd12e20853e1bf33850acafdf5a5adf5211b4c0 | [] | no_license | mironmiron3/SoftUni-Python-Advanced | eb6c077c3b94e0381a82ed3b4abb26f1098dec82 | c7ac896a8fcc1f13a09f4c5573bd183d788a3157 | refs/heads/main | 2023-07-09T23:00:18.404835 | 2021-08-24T14:05:21 | 2021-08-24T14:05:21 | 399,486,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | file = open("example.txt")
content1 = file.readline()
content2 = file.readline()
#print(content1)
print(content2) | [
"[email protected]"
] | |
1c6a029683af969af9e6686df9c21e1d0165a4b2 | 5e3ebc83bc3fe2f85c34563689b82b1fc8b93a04 | /google/ads/googleads/v5/enums/types/account_budget_proposal_status.py | 775b7b599ba465c2c0fdc70efefd98eefd7eb098 | [
"Apache-2.0"
] | permissive | pdsing/google-ads-python | 0ce70227cd6bb13a25cd13de0ca05c2636279ecd | ee2c059498d5679a0d1d9011f3795324439fad7c | refs/heads/master | 2023-05-04T18:39:57.412453 | 2021-05-21T16:38:17 | 2021-05-21T16:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v5.enums",
marshal="google.ads.googleads.v5",
manifest={"AccountBudgetProposalStatusEnum",},
)
class AccountBudgetProposalStatusEnum(proto.Message):
r"""Message describing AccountBudgetProposal statuses."""
class AccountBudgetProposalStatus(proto.Enum):
r"""The possible statuses of an AccountBudgetProposal."""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
REJECTED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
07c81e48ef1e0240cf2c4b5ca63eec342824fd44 | 846e8886bbe7e8c3cdee4ba505c2217f1da1d803 | /python/catkin/test_results.py | b3471991e33c50440122cc729b6db58321bb9dd9 | [] | no_license | jamuraa/catkin | ef315aa644459a73443d2a8d74e6e8c0954b47f3 | 91b133d4c2048af097fdea270a0a19c57b422ad0 | refs/heads/master | 2020-11-30T13:03:20.220219 | 2012-10-02T18:54:56 | 2012-10-02T18:54:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | from __future__ import print_function
import os
from xml.etree.ElementTree import ElementTree
def read_junit(filename):
tree = ElementTree()
root = tree.parse(filename)
num_tests = int(root.attrib['tests'])
num_errors = int(root.attrib['errors'])
num_failures = int(root.attrib['failures'])
return (num_tests, num_errors, num_failures)
def test_results(test_results_dir):
results = {}
for dirpath, dirnames, filenames in os.walk(test_results_dir):
# do not recurse into folders starting with a dot
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in [f for f in filenames if f.endswith('.xml')]:
filename_abs = os.path.join(dirpath, filename)
name = filename_abs[len(test_results_dir) + 1:]
try:
num_tests, num_errors, num_failures = read_junit(filename_abs)
except Exception as e:
print('Skipping "%s": %s' % (name, str(e)))
continue
results[name] = (num_tests, num_errors, num_failures)
return results
def print_summary(results, show_stable=False, show_unstable=True):
sum_tests = sum_errors = sum_failures = 0
for name in sorted(results.keys()):
(num_tests, num_errors, num_failures) = results[name]
sum_tests += num_tests
sum_errors += num_errors
sum_failures += num_failures
if show_stable and not num_errors and not num_failures:
print('%s: %d tests' % (name, num_tests))
if show_unstable and (num_errors or num_failures):
print('%s: %d tests, %d errors, %d failures' % (name, num_tests, num_errors, num_failures))
print('Summary: %d tests, %d errors, %d failures' % (sum_tests, sum_errors, sum_failures))
| [
"[email protected]"
] | |
810af9acd051bb92282777ed5159e2d3bea725ea | 471b5d4df7c92af540c3d348594cc6ea98d65fed | /dojo_python/flask/survey/survey.py | d3be0e91dea895719a61bdd25aa6ec76be766ca5 | [] | no_license | samuellly/dojo_assignment_file | 929c6d747077b47b35179f190075b1d9a54e257c | 37363982238fa7591a139a3af9beb20a8e165997 | refs/heads/master | 2021-01-13T05:30:02.462066 | 2017-05-20T00:27:47 | 2017-05-20T00:27:47 | 80,334,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
print ("Info received!")
return render_template('result.html', name = request.form['name'], location = request.form['location'], language = request.form['language'], comment = request.form['comment'])
app.run(debug=True)
| [
"[email protected]"
] | |
b029dde505319423c857d3ae2b468e2b48f9ea6d | 543286f4fdefe79bd149ff6e103a2ea5049f2cf4 | /Exercicios&cursos/eXcript/Aula 18 - Propriedade Sticky.py | 505b6762f67eaf3c58b626f5f1c71a0c1459ee06 | [] | no_license | antonioleitebr1968/Estudos-e-Projetos-Python | fdb0d332cc4f12634b75984bf019ecb314193cc6 | 9c9b20f1c6eabb086b60e3ba1b58132552a84ea6 | refs/heads/master | 2022-04-01T20:03:12.906373 | 2020-02-13T16:20:51 | 2020-02-13T16:20:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #width == largura
#height == altura
from tkinter import *
janela = Tk()
lb1 = Label(janela, text="ESPAÇO", width=15, height=3, bg="blue")
lbHORIZONTAL = Label(janela, text="HORIZONTAL", bg="yellow")
lbVERTICAL = Label(janela, text="VERTICAL", bg="yellow")
lb1.grid(row=0, column=0)
lbHORIZONTAL.grid(row=1, column=0, sticky=E)
lbVERTICAL.grid(row=0, column=1, sticky=S)
janela.geometry("200x200+100+100")
janela.mainloop()
| [
"[email protected]"
] | |
6c173948ffd1b8a67bce3a68d009815fc750f195 | 219634e73b1b861177fcd49c3d2fca0cfa00604e | /prev_project/crawl.py | 9e0e13fc4477feac92b913f1efb0701ddc66d3a3 | [
"MIT"
] | permissive | dongkoull/BigData-project | 10e2ee88c62981feffc496d309fd8140b8bc4cb4 | f6cd9b873a1ce7b1133f653d9b8f0e08c4ffd87d | refs/heads/master | 2020-03-31T08:23:05.035223 | 2018-10-05T05:59:07 | 2018-10-05T05:59:07 | 152,054,514 | 0 | 0 | MIT | 2020-12-13T08:29:27 | 2018-10-08T09:37:49 | Jupyter Notebook | UTF-8 | Python | false | false | 80 | py | '''
크롤링 공간
Naver, Daum, Blog, News
'''
from bs4 import BeautifulSoup
s | [
"[email protected]"
] | |
2056295116744d61aff23b37cb126feb78904a4e | 863a56f99b4668211b96d66e3d2698196e46f3b1 | /prng/cellular_automata/rule198/run.py | 309e1673dd75c013f3267dff98a2899f99f68d8b | [
"LicenseRef-scancode-public-domain"
] | permissive | atoponce/scripts | 15b958463d6e788ad6f7785d2614ddb372fc69a7 | b2c8fd2a0b68e83562570c315f4c9596ee546011 | refs/heads/master | 2023-04-28T05:47:07.918556 | 2023-04-15T15:02:05 | 2023-04-15T15:02:05 | 8,612,257 | 22 | 4 | null | 2016-12-22T19:21:28 | 2013-03-06T20:18:33 | Shell | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/python3
#seed = '00000000000000000100000000000000000' # textbook initial state
seed = '01011111110010010011010001100100010' # random initial state
bits = len(seed)
for n in range(5000):
print(int(seed, 2)/2**bits)
state = ''
p, q, r = -1, 0, 1
for n in range(bits): # there must be a more efficient way to do this
state += str(
#(int(seed[p])&int(seed[r]))^int(seed[q])^int(seed[r]) # boolean
(int(seed[q])+int(seed[r])+int(seed[p])*int(seed[r])) % 2 # algebraic
) # rule 198
p = (p + 1) % bits
q = (q + 1) % bits
r = (r + 1) % bits
seed = state
| [
"[email protected]"
] | |
502d9190cab58f6d069d44d54fb6d2e1eda3cf9e | a3181f8b0c3c22f9a24ac7e688502296b1f39386 | /finmarketpy/curve/fxforwardscurve.py | 0615f3ba2d1a6b842962d0fa5512433ec4b7de31 | [
"Apache-2.0"
] | permissive | pyzeon/finmarketpy | 656ef1ebcd2b0dd2247681e10685675deb8ce118 | f3dcd7a3b8cbdc91ac30e1e2e498e3f0acb3b097 | refs/heads/master | 2023-04-04T04:51:44.114098 | 2021-04-13T15:06:01 | 2021-04-13T15:06:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,129 | py | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
import pandas as pd
from pandas.tseries.offsets import CustomBusinessDay, CustomBusinessMonthEnd
from findatapy.market import Market, MarketDataRequest
from findatapy.timeseries import Calculations, Calendar, Filter
from findatapy.util.dataconstants import DataConstants
from findatapy.util.fxconv import FXConv
from finmarketpy.curve.rates.fxforwardspricer import FXForwardsPricer
from finmarketpy.util.marketconstants import MarketConstants
data_constants = DataConstants()
market_constants = MarketConstants()
class FXForwardsCurve(object):
"""Constructs continuous forwards time series total return indices from underlying forwards contracts.
"""
def __init__(self, market_data_generator=None, fx_forwards_trading_tenor=market_constants.fx_forwards_trading_tenor,
roll_days_before=market_constants.fx_forwards_roll_days_before,
roll_event=market_constants.fx_forwards_roll_event, construct_via_currency='no',
fx_forwards_tenor_for_interpolation=market_constants.fx_forwards_tenor_for_interpolation,
base_depos_tenor=data_constants.base_depos_tenor,
roll_months=market_constants.fx_forwards_roll_months,
cum_index=market_constants.fx_forwards_cum_index,
output_calculation_fields=market_constants.output_calculation_fields,
field='close'):
"""Initializes FXForwardsCurve
Parameters
----------
market_data_generator : MarketDataGenerator
Used for downloading market data
fx_forwards_trading_tenor : str
What is primary forward contract being used to trade (default - '1M')
roll_days_before : int
Number of days before roll event to enter into a new forwards contract
roll_event : str
What constitutes a roll event? ('month-end', 'quarter-end', 'year-end', 'expiry')
construct_via_currency : str
What currency should we construct the forward via? Eg. if we asked for AUDJPY we can construct it via
AUDUSD & JPYUSD forwards, as opposed to AUDJPY forwards (default - 'no')
fx_forwards_tenor_for_interpolation : str(list)
Which forwards should we use for interpolation
base_depos_tenor : str(list)
Which base deposits tenors do we need (this is only necessary if we want to start inferring depos)
roll_months : int
After how many months should we initiate a roll. Typically for trading 1M this should 1, 3M this should be 3
etc.
cum_index : str
In total return index, do we compute in additive or multiplicative way ('add' or 'mult')
output_calculation_fields : bool
Also output additional data should forward expiries etc. alongside total returns indices
"""
self._market_data_generator = market_data_generator
self._calculations = Calculations()
self._calendar = Calendar()
self._filter = Filter()
self._fx_forwards_trading_tenor = fx_forwards_trading_tenor
self._roll_days_before = roll_days_before
self._roll_event = roll_event
self._construct_via_currency = construct_via_currency
self._fx_forwards_tenor_for_interpolation = fx_forwards_tenor_for_interpolation
self._base_depos_tenor = base_depos_tenor
self._roll_months = roll_months
self._cum_index = cum_index
self._output_calcultion_fields = output_calculation_fields
self._field = field
def generate_key(self):
from findatapy.market.ioengine import SpeedCache
# Don't include any "large" objects in the key
return SpeedCache().generate_key(self, ['_market_data_generator', '_calculations', '_calendar', '_filter'])
def fetch_continuous_time_series(self, md_request, market_data_generator, fx_forwards_trading_tenor=None,
roll_days_before=None, roll_event=None,
construct_via_currency=None, fx_forwards_tenor_for_interpolation=None, base_depos_tenor=None,
roll_months=None, cum_index=None, output_calculation_fields=False, field=None):
if market_data_generator is None: market_data_generator = self._market_data_generator
if fx_forwards_trading_tenor is None: fx_forwards_trading_tenor = self._fx_forwards_trading_tenor
if roll_days_before is None: roll_days_before = self._roll_days_before
if roll_event is None: roll_event = self._roll_event
if construct_via_currency is None: construct_via_currency = self._construct_via_currency
if fx_forwards_tenor_for_interpolation is None: fx_forwards_tenor_for_interpolation = self._fx_forwards_tenor_for_interpolation
if base_depos_tenor is None: base_depos_tenor = self._base_depos_tenor
if roll_months is None: roll_months = self._roll_months
if cum_index is None: cum_index = self._cum_index
if output_calculation_fields is None: output_calculation_fields = self._output_calcultion_fields
if field is None: field = self._field
# Eg. we construct EURJPY via EURJPY directly (note: would need to have sufficient forward data for this)
if construct_via_currency == 'no':
# Download FX spot, FX forwards points and base depos etc.
market = Market(market_data_generator=market_data_generator)
md_request_download = MarketDataRequest(md_request=md_request)
fx_conv = FXConv()
# CAREFUL: convert the tickers to correct notation, eg. USDEUR => EURUSD, because our data
# should be fetched in correct convention
md_request_download.tickers = [fx_conv.correct_notation(x) for x in md_request.tickers]
md_request_download.category = 'fx-forwards-market'
md_request_download.fields = field
md_request_download.abstract_curve = None
md_request_download.fx_forwards_tenor = fx_forwards_tenor_for_interpolation
md_request_download.base_depos_tenor = base_depos_tenor
forwards_market_df = market.fetch_market(md_request_download)
# Now use the original tickers
return self.construct_total_return_index(md_request.tickers, forwards_market_df,
fx_forwards_trading_tenor=fx_forwards_trading_tenor,
roll_days_before=roll_days_before, roll_event=roll_event,
fx_forwards_tenor_for_interpolation=fx_forwards_tenor_for_interpolation,
roll_months=roll_months,
cum_index=cum_index,
output_calculation_fields=output_calculation_fields,
field=field)
else:
# eg. we calculate via your domestic currency such as USD, so returns will be in your domestic currency
# Hence AUDJPY would be calculated via AUDUSD and JPYUSD (subtracting the difference in returns)
total_return_indices = []
for tick in md_request.tickers:
base = tick[0:3]
terms = tick[3:6]
md_request_base = MarketDataRequest(md_request=md_request)
md_request_base.tickers = base + construct_via_currency
md_request_terms = MarketDataRequest(md_request=md_request)
md_request_terms.tickers = terms + construct_via_currency
# Construct the base and terms separately (ie. AUDJPY => AUDUSD & JPYUSD)
base_vals = self.fetch_continuous_time_series(md_request_base, market_data_generator,
fx_forwards_trading_tenor=fx_forwards_trading_tenor,
roll_days_before=roll_days_before, roll_event=roll_event,
fx_forwards_tenor_for_interpolation=fx_forwards_tenor_for_interpolation,
base_depos_tenor=base_depos_tenor,
roll_months=roll_months, output_calculation_fields=False,
cum_index=cum_index,
construct_via_currency='no',
field=field)
terms_vals = self.fetch_continuous_time_series(md_request_terms, market_data_generator,
fx_forwards_trading_tenor=fx_forwards_trading_tenor,
roll_days_before=roll_days_before, roll_event=roll_event,
fx_forwards_tenor_for_interpolation=fx_forwards_tenor_for_interpolation,
base_depos_tenor=base_depos_tenor,
roll_months=roll_months,
cum_index=cum_index,
output_calculation_fields=False,
construct_via_currency='no',
field=field)
# Special case for USDUSD case (and if base or terms USD are USDUSD
if base + terms == construct_via_currency + construct_via_currency:
base_rets = self._calculations.calculate_returns(base_vals)
cross_rets = pd.DataFrame(0, index=base_rets.index, columns=base_rets.columns)
elif base + construct_via_currency == construct_via_currency + construct_via_currency:
cross_rets = -self._calculations.calculate_returns(terms_vals)
elif terms + construct_via_currency == construct_via_currency + construct_via_currency:
cross_rets = self._calculations.calculate_returns(base_vals)
else:
base_rets = self._calculations.calculate_returns(base_vals)
terms_rets = self._calculations.calculate_returns(terms_vals)
cross_rets = base_rets.sub(terms_rets.iloc[:, 0], axis=0)
# First returns of a time series will by NaN, given we don't know previous point
cross_rets.iloc[0] = 0
cross_vals = self._calculations.create_mult_index(cross_rets)
cross_vals.columns = [tick + '-forward-tot.' + field]
total_return_indices.append(cross_vals)
return self._calculations.join(total_return_indices, how='outer')
def unhedged_asset_fx(self, assets_df, asset_currency, home_curr, start_date, finish_date, spot_df=None):
pass
def hedged_asset_fx(self, assets_df, asset_currency, home_curr, start_date, finish_date, spot_df=None,
total_return_indices_df=None):
pass
def get_day_count_conv(self, currency):
if currency in market_constants.currencies_with_365_basis:
return 365.0
return 360.0
def construct_total_return_index(self, cross_fx, forwards_market_df,
fx_forwards_trading_tenor=None,
roll_days_before=None,
roll_event=None,
roll_months=None,
fx_forwards_tenor_for_interpolation=None,
cum_index=None,
output_calculation_fields=None,
field=None):
if not (isinstance(cross_fx, list)):
cross_fx = [cross_fx]
if fx_forwards_trading_tenor is None: fx_forwards_trading_tenor = self._fx_forwards_trading_tenor
if roll_days_before is None: roll_days_before = self._roll_days_before
if roll_event is None: roll_event = self._roll_event
if roll_months is None: roll_months = self._roll_months
if fx_forwards_tenor_for_interpolation is None: fx_forwards_tenor_for_interpolation = self._fx_forwards_tenor_for_interpolation
if cum_index is None: cum_index = self._cum_index
if field is None: field = self._field
total_return_index_df_agg = []
# Remove columns where there is no data (because these points typically aren't quoted)
forwards_market_df = forwards_market_df.dropna(how='all', axis=1)
fx_forwards_pricer = FXForwardsPricer()
def get_roll_date(horizon_d, delivery_d, asset_hols, month_adj=1):
if roll_event == 'month-end':
roll_d = horizon_d + CustomBusinessMonthEnd(roll_months + month_adj, holidays=asset_hols)
elif roll_event == 'delivery-date':
roll_d = delivery_d
return (roll_d - CustomBusinessDay(n=roll_days_before, holidays=asset_hols))
for cross in cross_fx:
# Eg. if we specify USDUSD
if cross[0:3] == cross[3:6]:
total_return_index_df_agg.append(
pd.DataFrame(100, index=forwards_market_df.index, columns=[cross + "-forward-tot.close"]))
else:
# Is the FX cross in the correct convention
old_cross = cross
cross = FXConv().correct_notation(cross)
horizon_date = forwards_market_df.index
delivery_date = []
roll_date = []
new_trade = np.full(len(horizon_date), False, dtype=bool)
asset_holidays = self._calendar.get_holidays(cal=cross)
# Get first delivery date
delivery_date.append(
self._calendar.get_delivery_date_from_horizon_date(horizon_date[0],
fx_forwards_trading_tenor, cal=cross, asset_class='fx')[0])
# For first month want it to expire within that month (for consistency), hence month_adj=0 ONLY here
roll_date.append(get_roll_date(horizon_date[0], delivery_date[0], asset_holidays, month_adj=0))
# New trade => entry at beginning AND on every roll
new_trade[0] = True
# Get all the delivery dates and roll dates
# At each "roll/trade" day we need to reset them for the new contract
for i in range(1, len(horizon_date)):
# If the horizon date has reached the roll date (from yesterday), we're done, and we have a
# new roll/trade
if (horizon_date[i] - roll_date[i-1]).days == 0:
new_trade[i] = True
# else:
# new_trade[i] = False
# If we're entering a new trade/contract, we need to get new delivery and roll dates
if new_trade[i]:
delivery_date.append(self._calendar.get_delivery_date_from_horizon_date(horizon_date[i],
fx_forwards_trading_tenor, cal=cross, asset_class='fx')[0])
roll_date.append(get_roll_date(horizon_date[i], delivery_date[i], asset_holidays))
else:
# Otherwise use previous delivery and roll dates, because we're still holding same contract
delivery_date.append(delivery_date[i-1])
roll_date.append(roll_date[i-1])
interpolated_forward = fx_forwards_pricer.price_instrument(cross, horizon_date, delivery_date, market_df=forwards_market_df,
fx_forwards_tenor_for_interpolation=fx_forwards_tenor_for_interpolation)[cross + '-interpolated-outright-forward.' + field].values
# To record MTM prices
mtm = np.copy(interpolated_forward)
# Note: may need to add discount factor when marking to market forwards?
# Special case: for very first trading day
# mtm[0] = interpolated_forward[0]
# On rolling dates, MTM will be the previous forward contract (interpolated)
# otherwise it will be the current forward contract
for i in range(1, len(horizon_date)):
if new_trade[i]:
mtm[i] = fx_forwards_pricer.price_instrument(cross, horizon_date[i], delivery_date[i-1],
market_df=forwards_market_df,
fx_forwards_tenor_for_interpolation=fx_forwards_tenor_for_interpolation) \
[cross + '-interpolated-outright-forward.' + field].values
# else:
# mtm[i] = interpolated_forward[i]
# Eg. if we asked for USDEUR, we first constructed spot/forwards for EURUSD
# and then need to invert it
if old_cross != cross:
mtm = 1.0 / mtm
interpolated_forward = 1.0 / interpolated_forward
forward_rets = mtm / np.roll(interpolated_forward, 1) - 1.0
forward_rets[0] = 0
if cum_index == 'mult':
cum_rets = 100 * np.cumprod(1.0 + forward_rets)
elif cum_index == 'add':
cum_rets = 100 + 100 * np.cumsum(forward_rets)
total_return_index_df = pd.DataFrame(index=horizon_date, columns=[cross + "-forward-tot." + field])
total_return_index_df[cross + "-forward-tot." + field] = cum_rets
if output_calculation_fields:
total_return_index_df[cross + '-interpolated-outright-forward.' + field] = interpolated_forward
total_return_index_df[cross + '-mtm.close'] = mtm
total_return_index_df[cross + '-roll.close'] = new_trade
total_return_index_df[cross + '.roll-date'] = roll_date
total_return_index_df[cross + '.delivery-date'] = delivery_date
total_return_index_df[cross + '-forward-return.' + field] = forward_rets
total_return_index_df_agg.append(total_return_index_df)
return self._calculations.join(total_return_index_df_agg, how='outer')
| [
"[email protected]"
] | |
946fd49ed7af083f41429c81ef2bb5819af47060 | 9a0a4e1f843d1457c4f466c05c994f3e6ecd842a | /change_transparency.py | 543c49f2aa8d0ef476010ab9f243970f94d0c354 | [] | no_license | sjbrown/steam_jet_blower | 688aa44e43ea8a285ebaf3923473b4a4049b5537 | 5b894354cb60b5d5d6eee74af77140af641580ee | refs/heads/master | 2021-01-10T03:19:21.853486 | 2016-03-18T20:27:49 | 2016-03-18T20:27:49 | 54,229,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | #!/usr/bin/env python
#Import Modules
import pygame
from pygame.locals import *
_cachedOriginals = {}
_cachedCalculatedArrays = {}
#-----------------------------------------------------------------------------
def change_alpha_mult(img, percentAlpha):
global _cachedOriginals
global _cachedCalculatedArrays
if percentAlpha < 0 or percentAlpha > 100 or type(percentAlpha) != int:
raise Exception( "percentAlpha not an int between 0 and 100" )
floatAlpha = float(percentAlpha) / 100
alphaArray = pygame.surfarray.pixels_alpha( img )
if not _cachedOriginals.has_key( id(img) ):
origArray = alphaArray
_cachedOriginals[id(img)] = alphaArray[:]
else:
origArray = _cachedOriginals[id(img)]
key = ( percentAlpha, id(img) )
if _cachedCalculatedArrays.has_key( key ):
alphaArray = _cachedCalculatedArrays[ key ][:]
else:
for i in xrange( len(alphaArray) ):
alphaArray[i] = [ floatAlpha*x for x in origArray[i] ]
_cachedCalculatedArrays[ key ] = alphaArray[:]
del alphaArray #this unlocks the surface
#this calls the 'main' function when this script is executed
if __name__ == '__main__': print "didn't expect that!"
| [
"[email protected]"
] | |
7c89b5a70eaa41d0b10e26ac6461585729c21d14 | 05b80d92bb2efec76f898c527cc803f931031266 | /Blind 75/Programs/Longest Repeating Character Replacement.py | 1d5251bb53822143571115ed0129d2c93426ce21 | [] | no_license | PriyankaKhire/ProgrammingPracticePython | b5a6af118f3d4ec19de6fcccb7933d84f7522d1a | 8dd152413dce2df66957363ff85f0f4cefa836e8 | refs/heads/master | 2022-08-28T00:44:34.595282 | 2022-08-12T19:08:32 | 2022-08-12T19:08:32 | 91,215,578 | 18 | 11 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | # Longest Repeating Character Replacement
# https://leetcode.com/problems/longest-repeating-character-replacement/
# Solution understood from.
# https://leetcode.com/problems/longest-repeating-character-replacement/discuss/358879/Java-Solution-Explained-and-Easy-to-Understand-for-Interviews
'''
formula: (length of substring - number of times of the maximum occurring character in the substring) <= k
'''
class Solution(object):
def addToHashMap(self, letter, hashMap):
if (letter not in hashMap):
hashMap[letter] = 0
hashMap[letter] = hashMap[letter] + 1
#print hashMap
def characterReplacement(self, s, k):
# key: character; value: count
hashMap = {}
start = 0
maxOccurringCharCount = 0
longestLength = 0
for end in range(len(s)):
#print "start", start, "end", end
#print "longestLength", longestLength
self.addToHashMap(s[end], hashMap)
# if the current letter is most frequently occurring then update the count.
maxOccurringCharCount = max(maxOccurringCharCount, hashMap[s[end]])
# get the length of current substring
substringLength = (end - start)+1
if((substringLength - maxOccurringCharCount) <= k):
longestLength = max(longestLength, substringLength)
else:
# since the character at start is no longer in our window
hashMap[s[start]] = hashMap[s[start]] - 1
start = start + 1
return longestLength
"""
:type s: str
:type k: int
:rtype: int
"""
| [
"[email protected]"
] | |
6f62aac4b432ea6c0ddfaf845217dc767679d71f | 12d1bcb4bb0a473d163048f1c5ac9eef6389bc24 | /HypothesisTesting/Quiz.py | d386378049509604a12e023d6c89890c25f5779e | [] | no_license | Bharadwaja92/DataScienceProjects | 339795c08c4b631006f1602ec84f3b48b828e538 | 088305387339affa662ac3d88ea5fac2651295b5 | refs/heads/master | 2020-03-29T19:23:58.041782 | 2019-01-29T12:22:03 | 2019-01-29T12:22:03 | 150,261,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | """"""
"""
Which of these is an accurate statement of the Central Limit Theorem?
For a large enough sample size, our sample mean will be sufficiently close to the population mean.
What is a statistical hypothesis test?
A way of quantifying the truth of a statement.
Which of the following describes a Type II error? False negative
A survey on preferred ice cream flavors not establishing a clear favorite when the majority of people prefer chocolate.
What is a p-value?
In a hypothesis test, a p-value is the probability that the null hypothesis is true.
Suppose we were exploring the relationship between local honey and allergies.
Which of these would be a statement of the null hypothesis?
Local honey has no effect on allergies, any relationship between consuming local honey and allergic outbreaks is due to chance.
Which of these describes a sample mean?
The mean of a subset of our population which is hopefully, but not necessarily, representative of the overall average.
Which of the following hypothesis tests would be used to compare two sets of numerical data?
2 Sample T-Test
* Analysis of variance is used to determine if three or more numerical samples come from the same population.
"""
| [
"[email protected]"
] | |
b508232586963bd3703658b87b4854b11d1c3e75 | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20상반기 코딩테스트/한수/bj1065.py | b711cbf86b44c09064fe63cda2dc9461a9d7b1d7 | [] | no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | import sys
sys.stdin = open('bj1065.txt','r')
N=int(input())
if N<100:print(N)
else:
R=0
for i in range(100,N+1):
a,b,c=i//100,(i%100)//10,i%10
if a-b==b-c:R+=1
print(99+R) | [
"[email protected]"
] | |
263feec81bd5161ad7aca3304939729b59c6e0f5 | 6e466112c3682338ec56c892c883284704fbb727 | /bflib/restrictions/weapons.py | e21e12d074b299dcaffacd3c90e51a5f8e5dbcfd | [
"MIT"
] | permissive | ChrisLR/BFLib | 5aee153aeaef72516f737abf74cf89e7ec1cb90a | 2af49cc113792c4967c0c8c5bf32a1b76876e6e2 | refs/heads/master | 2021-01-22T17:52:58.790057 | 2017-11-15T17:46:56 | 2017-11-15T17:46:56 | 102,407,112 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from bflib.keywords.weapons import WeaponWieldKeyword
from bflib.restrictions.base import Restriction
class WeaponRestrictionSet(Restriction):
__slots__ = ["included", "excluded"]
def __init__(self, included=None, excluded=None):
self.included = included
self.excluded = excluded
class WeaponSizeRestrictionSet(Restriction):
__slots__ = ["large", "medium", "small"]
keywords = WeaponWieldKeyword
def __init__(self, large=keywords.CanWield, medium=keywords.CanWield, small=keywords.CanWield):
self.large = large
self.medium = medium
self.small = small
| [
"[email protected]"
] | |
581eb71ed8e3a43f72e7d7c856a6ef0ca4273774 | a78b1c41fc038703e58d5249a9948fbfd06f8159 | /code_nodeperturbation/FM4/sim2/gene/gene.py | 47d12a4d13f3c51625eb54494462cfc38ce251d7 | [] | no_license | le-chang/DISC1_interactome | 15ed1025048e49d5bb6b6bd13eac4f148fe83d04 | b517309b8583358220c2a639d4ef5d303bfb0acd | refs/heads/master | 2021-02-13T21:00:20.418928 | 2019-04-24T13:59:50 | 2019-04-24T13:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,652 | py | """
Migration simulator
It is also a demonstration on how the collector works
"""
import boolean2
from boolean2 import Model, util
from random import choice
# ocasionally randomized nodes
TARGETS = set( "Migration".split() )
def new_getvalue( state, name, p):
"""
Called every time a node value is used in an expression.
It will override the value for the current step only.
Returns random values for the node states
"""
global TARGETS
value = util.default_get_value( state, name, p )
if name in TARGETS:
# pick at random from True, False and original value
return choice( [True, False, value] )
else:
return value
def run( text, nodes, repeat, steps ):
"""
Runs the simulation and collects the nodes into a collector,
a convenience class that can average the values that it collects.
"""
coll = util.Collector()
for i in xrange( repeat ):
engine = Model( mode='async', text=text )
engine.RULE_GETVALUE = new_getvalue
# minimalist initial conditions, missing nodes set to false
engine.initialize( missing=util.false )
engine.iterate( steps=steps)
coll.collect( states=engine.states, nodes=nodes )
print '- completed'
avgs = coll.get_averages( normalize=True )
return avgs
if __name__ == '__main__':
# read in the text
text = file( 'sim2.txt').read()
# the nodes of interest that are collected over the run
# NODES = 'Apoptosis STAT3 FasL Ras'.split()
# this collects the state of all nodes
NODES = boolean2.all_nodes( text )
#
# raise this for better curves (will take about 2 seconds per repeat)
# plots were made for REPEAT = 1000, STEPS=150
#
REPEAT = 1000
STEPS = 150
data = []
print '- starting simulation with REPEAT=%s, STEPS=%s' % (REPEAT, STEPS)
# multiple overexrpessed nodes
mtext = boolean2.modify_states( text=text, turnon=['APP'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['DAB1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['DISC1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['NDEL1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['PAFAH1B1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['APP'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['DAB1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['DISC1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['NDEL1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['PAFAH1B1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
fname = 'gene.bin'
util.bsave( data, fname=fname )
print '- data saved into %s' % fname
| [
"[email protected]"
] | |
00c0b0cfc6d43856b8c8354dd1095c8801b7699e | 317e68dc7045390f41b10b8aa35d593f93c507d5 | /test/test_cluster_collection.py | 5e158a5a71b90c0e87c505d8f2392831b6de5bad | [] | no_license | daletcoreil/facerecognition-client-python-sdk | e3302b00c4309790db6aad6f111cc86f09152c4a | 50934b0ec247a4005e84652e10f679d3a7652dfb | refs/heads/master | 2021-04-23T08:45:44.475121 | 2020-07-22T12:57:56 | 2020-07-22T12:57:56 | 249,914,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,755 | py | # coding: utf-8
"""
Dalet Media Mediator API
# Scope Dalet Mediator API allows you to submit long running media jobs managed by Dalet services. Long running media jobs include: - **Media processing** such as transcoding or automatic QC. - **Automatic metadata extraction** such as automatic speech transcription or face detection. The Dalet Mediator API is a REST API with typed schema for the payload. # Architecture Job processing is performed on the cloud via dynamic combination of microservices. Dalet Mediator adopts the [EBU MCMA] architecture. The key objectives of this architecture are to support: - Job management and monitoring - Long running transactions - Event based communication pattern - Service registration and discovery - Horizontal scalability in an elastic manner The architecture is implemented using the serverless approach - relying on independent microservices accessible through well documented REST endpoints and sharing a common object model. ## Roles The following services are involved in the processing of media jobs exposed through the Dalet Media Mediator API: - **Mediator**: this is the main entry point to the architecture; this API endpoint supports: 1. Checking authentication using an API key and a token mechanism 2. Verifying quota restrictions before accepting a submitted job 3. Keeping track of usage so that job processing can be tracked and billed 4. Keeping track of jobs metadata as a job repository - **Job Processor**: once a job request is accepted by the mediator, it is assigned to a Job Processor. The Job Processor dispatches the job to an appropriate Job Worker (depending on the job profile and other criteria such as load on the system and cost of operation). It then keeps track of the progress of the job and its status until completion and possible failures and timeout. It reports progress to the Mediator through notifications. - **Job Worker**: The Job Worker performs the actual work on the media object, for example, AI metadata extraction (AME) or essence transcoding. It reports progress to the Job Processor through notifications. - **Service Registry**: The Service Registry keeps track of all active services in the architecture. It is queried by the Mediator and by Processors to discover candidate services to perform jobs. It is updated whenever a new service is launched or stopped. The Service Registry also stores the list of all job profiles supported by one of the Job Workers deployed in the architecture. The Dalet Mediator API abstracts away from the complexity of this orchestration and provides a simple endpoint to submit long running jobs and monitor the progress of their execution. It serves as a facade for the additional technical services for authentication, usage monitoring and service registry. [EBU MCMA]: /https://tech.ebu.ch/groups/mcma 'EBU MCMA' ## Job Lifecycle  ## Authentication To use the Dalet Mediator API - you must obtain an APIKey from Dalet. This key comes in the form of two parameters: * client ID * secret Given these two parameters, a client program must first obtain an access token (GET /auth/access-token) and then associate this token to every subsequent calls. When the token expires, the API will return a 401 error code. In this case, the client must request a new token and resubmit the request. # noqa: E501
The version of the OpenAPI document: 1.4.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import facerecognition_client
from facerecognition_client.models.cluster_collection import ClusterCollection # noqa: E501
from facerecognition_client.rest import ApiException
class TestClusterCollection(unittest.TestCase):
"""ClusterCollection unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ClusterCollection
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = facerecognition_client.models.cluster_collection.ClusterCollection() # noqa: E501
if include_optional :
return ClusterCollection(
uid = '0',
tenant_id = '0',
project_service_id = '0',
job_id = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
created_by = '0',
modified_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
modified_by = '0',
name = '0',
clusters = [
facerecognition_client.models.cluster.Cluster(
uid = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
job_id = '0',
identity = '0',
identified_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
identified_by = '0',
curated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
curated_by = '0',
face_ids = [
'0'
], )
]
)
else :
return ClusterCollection(
clusters = [
facerecognition_client.models.cluster.Cluster(
uid = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
job_id = '0',
identity = '0',
identified_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
identified_by = '0',
curated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
curated_by = '0',
face_ids = [
'0'
], )
],
)
def testClusterCollection(self):
"""Test ClusterCollection"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
28ab99c19eab747771628ecc969b9664add8722c | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc055/A/4872849.py | d058a529a0e1165d93ce465dc51afd09472ccc13 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | N = int(input())
wari = N //15
goukei = N*800 - wari*200
print(goukei) | [
"[email protected]"
] | |
8520cda3561bf8b7c960f8602b2bced270173fa2 | 7c0acdc46cfce5dc116d394f6990ee5ab1c0fa0c | /venv/lib/python3.7/site-packages/builders/logger.py | 2315b5710fc7b44fd1e8f35c660d87e62010e1e9 | [
"MIT"
] | permissive | Vatansever27/ExchangeCode | 84fb4a02371fdda7cd94d00971be76bcd1068be0 | ab284653a337937139a9a28c036efe701fb376c7 | refs/heads/master | 2020-04-07T16:38:59.819929 | 2018-11-21T12:18:30 | 2018-11-21T12:18:30 | 158,537,067 | 0 | 0 | null | 2018-11-21T12:18:31 | 2018-11-21T11:22:14 | null | UTF-8 | Python | false | false | 284 | py | '''
Created on Sep 10, 2013
This module holds logger configuration for builders
@author: pupssman
'''
import logging
logger = logging.getLogger('builders')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.WARN)
logger.addHandler(handler)
| [
"[email protected]"
] | |
4f122446d7f74b618c9d6df7407213c5b1993795 | 70744b927246edb4cfdc405bd3528513d9ea9ded | /envios/autocomplete_light_registry.py | cd1e842920390dfa7c8bab2e7b617952f2f99540 | [] | no_license | jesusmaherrera/enviamexpaisano | e0616cbba47a4b4bddc897fbf2244d92c59c10fd | dd9e3e8270616a8cb73704dc7076791e36ecc98f | refs/heads/master | 2016-09-06T04:30:19.848954 | 2013-06-07T06:05:27 | 2013-06-07T06:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import autocomplete_light
from cities_light.models import City
autocomplete_light.register(City, search_fields=('search_names',),
autocomplete_js_attributes={'placeholder': 'Nombre de la ciudad..'})
autocomplete_light.register(City, search_fields=('name',),
autocomplete_js_attributes={'placeholder': 'Nombre de la ciudad..'})
| [
"[email protected]"
] | |
be302e0706e2794ea3306d1e0fd8b9e27cb8dd64 | 0eb245b181d0455cb810bd188c0e5607f7702f88 | /impacts/composites_VAR_PV_ninio.py | a9cbc5060d2dc9c2196c65cd382e8f32c41aca0a | [] | no_license | marisolosman/ENSO_SPV_SH_climate | 1d3601cfa793dbce4de5db4f8e9e5c20839bab09 | 7128f0f620698145dbcf69e53c19786973006423 | refs/heads/master | 2023-02-08T13:56:46.220639 | 2020-12-29T20:52:48 | 2020-12-29T20:52:48 | 325,384,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,092 | py | #composites on PV years conditioned on ENSO strength
import sys
import numpy as np
import xarray as xr
import os
import regional_plots
import plots
NAME = sys.argv[1]
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
PATH_DATA = '/pikachu/datos/osman/assessment_SH_zonal_asymmetries/data/'
FIG_PATH = '/pikachu/datos/osman/assessment_SH_zonal_asymmetries/figures/impacts/'
FILE_VAR = NAME + '_s4_aug_feb.nc4'
FILE_NINIO_S4 = 'ninio34_monthly.nc4'
FILE_PV_S4 = 'SPV_index.nc4'
ninio34 = xr.open_dataset(PATH_DATA + FILE_NINIO_S4)
PV_index = xr.open_dataset(PATH_DATA + FILE_PV_S4)
#search for en years
index_ninio = ninio34.ninio34_index >= ninio34.ninio34_index.quantile(0.75, dim='dim_0',
interpolation='linear')
index_ninia = ninio34.ninio34_index <= ninio34.ninio34_index.quantile(0.25, dim='dim_0',
interpolation='linear')
# PV intensity during all years
#search for years with weak PV
index_SPV_upper = PV_index.SPV_index >= PV_index.SPV_index.quantile(0.75, dim='dim_0', interpolation='linear')
#search for years with strong PV
index_SPV_lower = PV_index.SPV_index <= PV_index.SPV_index.quantile(0.25, dim='dim_0', interpolation='linear')
#PoV during ninio years
index_WSPV_ninio = np.logical_and(index_ninio.values, index_SPV_upper.values)
index_SSPV_ninio = np.logical_and(index_ninio.values, index_SPV_lower.values)
#PoV during ninia years
index_WSPV_ninia = np.logical_and(index_ninia.values, index_SPV_upper.values)
index_SSPV_ninia = np.logical_and(index_ninia.values, index_SPV_lower.values)
nn_WSPV_all = np.sum(index_SPV_upper.values)
nn_SSPV_all = np.sum(index_SPV_lower.values)
nn_WSPV_ninio = np.sum(index_WSPV_ninio)
nn_SSPV_ninio = np.sum(index_SSPV_ninio)
nn_WSPV_ninia = np.sum(index_WSPV_ninia)
nn_SSPV_ninia = np.sum(index_SSPV_ninia)
nn_all = np.shape(ninio34.ninio34_index.values)[0]
month = ['Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb']
seas = ['ASO', 'SON', 'OND', 'NDJ', 'DJF']
VAR = xr.open_dataset(PATH_DATA + FILE_VAR)
for i in np.arange(0, 7):
var_WSPV_ninio = np.mean(VAR.isel(month=i, realiz=index_WSPV_ninio, drop=True),
axis=0).to_array().squeeze()
SS_WSPV_ninio = np.var(VAR.isel(month=i, realiz=index_WSPV_ninio, drop=True),
axis=0).to_array().squeeze()/nn_WSPV_ninio
var_SSPV_ninio = np.mean(VAR.isel(month=i, realiz=index_SSPV_ninio, drop=True),
axis=0).to_array().squeeze()
SS_SSPV_ninio = np.var(VAR.isel(month=i, realiz=index_SSPV_ninio, drop=True),
axis=0).to_array().squeeze()/nn_SSPV_ninio
var_WSPV_ninia = np.mean(VAR.isel(month=i, realiz=index_WSPV_ninia, drop=True),
axis=0).to_array().squeeze()
SS_WSPV_ninia = np.var(VAR.isel(month=i, realiz=index_WSPV_ninia, drop=True),
axis=0).to_array().squeeze()/nn_WSPV_ninia
var_SSPV_ninia = np.mean(VAR.isel(month=i, realiz=index_SSPV_ninia, drop=True),
axis=0).to_array().squeeze()
SS_SSPV_ninia = np.var(VAR.isel(month=i, realiz=index_SSPV_ninia, drop=True),
axis=0).to_array().squeeze()/nn_SSPV_ninia
var_WSPV_all = np.mean(VAR.isel(month=i, realiz=index_SPV_upper.values, drop=True),
axis=0).to_array().squeeze()
SS_WSPV_all = np.var(VAR.isel(month=i, realiz=index_SPV_upper.values),
axis=0).to_array().squeeze()/np.sum(index_SPV_upper.values)
var_all = np.mean(VAR.isel(month=i, drop=True), axis=0).to_array().squeeze()
SS_all = np.var(VAR.isel(month=i, drop=True), axis=0).to_array().squeeze() / nn_all
var_SSPV_all = np.mean(VAR.isel(month=i, realiz=index_SPV_lower.values, drop=True),
axis=0).to_array().squeeze()
SS_SSPV_all = np.var(VAR.isel(month=i, realiz=index_SPV_lower.values, drop=True),
axis=0).to_array().squeeze()/np.sum(index_SPV_lower.values)
tit = 'Composites S4 ' + NAME + ' Conditioned - ENSO - ' + month[i]
filename = FIG_PATH + NAME + '_composites_SPoV_' + month[i] +'_ENSO.png'
plots.PlotVARCompositesPoVENSOSIG(NAME, var_WSPV_all - var_all, var_SSPV_all - var_all,
var_WSPV_ninio - var_all, var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all, var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all), np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all), np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all), np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2, nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2, nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2, nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + month[i] +'_ENSO_Aust.png'
regional_plots.PlotVARCompositesPoVENSOSIGAust(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + month[i] +'_ENSO_Afric.png'
regional_plots.PlotVARCompositesPoVENSOSIGAfric(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + month[i] +'_ENSO_Sudam.png'
regional_plots.PlotVARCompositesPoVENSOSIGSudam(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + month[i] +'_ENSO_Antarc.png'
regional_plots.PlotVARCompositesPoVENSOSIGAntarc(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
for i in np.arange(0, 5):
VAR_s = VAR.isel(month=range(i, i+3)).mean(dim='month')
var_WSPV_ninio = np.mean(VAR_s.isel( realiz=index_WSPV_ninio, drop=True),
axis=0).to_array().squeeze()
SS_WSPV_ninio = np.var(VAR_s.isel( realiz=index_WSPV_ninio, drop=True),
axis=0).to_array().squeeze()/nn_WSPV_ninio
var_SSPV_ninio = np.mean(VAR_s.isel( realiz=index_SSPV_ninio, drop=True),
axis=0).to_array().squeeze()
SS_SSPV_ninio = np.var(VAR_s.isel( realiz=index_SSPV_ninio, drop=True),
axis=0).to_array().squeeze()/nn_SSPV_ninio
var_WSPV_ninia = np.mean(VAR_s.isel( realiz=index_WSPV_ninia, drop=True),
axis=0).to_array().squeeze()
SS_WSPV_ninia = np.var(VAR_s.isel( realiz=index_WSPV_ninia, drop=True),
axis=0).to_array().squeeze()/nn_WSPV_ninia
var_SSPV_ninia = np.mean(VAR_s.isel( realiz=index_SSPV_ninia, drop=True),
axis=0).to_array().squeeze()
SS_SSPV_ninia = np.var(VAR_s.isel( realiz=index_SSPV_ninia, drop=True),
axis=0).to_array().squeeze()/nn_SSPV_ninia
var_WSPV_all = np.mean(VAR_s.isel( realiz=index_SPV_upper.values, drop=True),
axis=0).to_array().squeeze()
SS_WSPV_all = np.var(VAR_s.isel( realiz=index_SPV_upper.values),
axis=0).to_array().squeeze()/np.sum(index_SPV_upper.values)
var_all = np.mean(VAR_s, axis=0).to_array().squeeze()
SS_all = np.var(VAR_s, axis=0).to_array().squeeze()/nn_all
var_SSPV_all = np.mean(VAR_s.isel( realiz=index_SPV_lower.values, drop=True),
axis=0).to_array().squeeze()
SS_SSPV_all = np.var(VAR_s.isel( realiz=index_SPV_lower.values, drop=True),
axis=0).to_array().squeeze()/np.sum(index_SPV_lower.values)
tit = 'Composites S4 ' + NAME + ' Conditioned - ENSO - ' + seas[i]
filename = FIG_PATH + NAME + '_composites_SPoV_' + seas[i] +'_ENSO.png'
plots.PlotVARCompositesPoVENSOSIG(NAME, var_WSPV_all - var_all, var_SSPV_all - var_all,
var_WSPV_ninio - var_all, var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all, var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all), np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all), np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all), np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2, nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2, nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2, nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + seas[i] +'_ENSO_Aust.png'
regional_plots.PlotVARCompositesPoVENSOSIGAust(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + seas[i] +'_ENSO_Afric.png'
regional_plots.PlotVARCompositesPoVENSOSIGAfric(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + seas[i] +'_ENSO_Sudam.png'
regional_plots.PlotVARCompositesPoVENSOSIGSudam(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
filename = FIG_PATH + NAME + '_composites_SPoV_' + seas[i] +'_ENSO_Antarc.png'
regional_plots.PlotVARCompositesPoVENSOSIGAntarc(NAME, var_WSPV_all - var_all,
var_SSPV_all - var_all,
var_WSPV_ninio - var_all,
var_SSPV_ninio - var_all,
var_WSPV_ninia - var_all,
var_SSPV_ninia - var_all,
np.sqrt(SS_WSPV_all + SS_all),
np.sqrt(SS_SSPV_all + SS_all),
np.sqrt(SS_WSPV_ninio + SS_all),
np.sqrt(SS_SSPV_ninio + SS_all),
np.sqrt(SS_WSPV_ninia + SS_all),
np.sqrt(SS_SSPV_ninia + SS_all),
nn_WSPV_all + nn_all - 2,
nn_SSPV_all + nn_all - 2,
nn_WSPV_ninio + nn_all - 2,
nn_SSPV_ninio + nn_all - 2,
nn_WSPV_ninia + nn_all - 2,
nn_SSPV_ninia + nn_all - 2,
VAR.latitude, VAR.longitude, tit, filename)
| [
"[email protected]"
] | |
e944ac632c5986200ef656717afb0a52d305c33e | 5ec48e90f711c9514a6d2ee36dbb46bc1ba71b74 | /shop/urls.py | c552e41a6565ef31e6acd61ea30c24f84cf3f152 | [] | no_license | hanieh-mav/hanieh_shop | 1ca5042fefb970459d9f48fb716a95fec6a530bb | b7cf253e11b6c167e78b245f253a8d057f435026 | refs/heads/main | 2023-06-10T16:37:26.385048 | 2021-07-07T14:19:58 | 2021-07-07T14:19:58 | 372,892,835 | 2 | 0 | null | 2021-07-07T14:19:59 | 2021-06-01T16:19:48 | CSS | UTF-8 | Python | false | false | 443 | py | from django.urls import path
from .views import home , category_detail , ProductDetail
app_name = 'shop'
urlpatterns = [
path('',home,name='home'),
path('page/<int:page>',home,name='home'),
path('category/<slug:slug>',category_detail,name='category_detail'),
path('category/<slug:slug>/<int:page>',category_detail,name='category_detail'),
path('detail/<int:pk>',ProductDetail.as_view(),name='product_detaill'),
] | [
"[email protected]"
] | |
3e733750ad74b97a747c6020dc169f595fa9de9a | 38422c3edeb269926502fed31a0761aff8dd3d3b | /Swanepoel_analysis/Swanepoel_analysis/Old_control_files/Swanepoel_GUI_v3.py | 48959058c2d200add495bd6e2d6cbe2102f979f5 | [] | no_license | vfurtula/Alle-projekter | 2dab3ccbf7ddb6be3ee09f9f5e87085f354dd84a | da3d7c9611088043e2aea5d844f1ae6056215e04 | refs/heads/master | 2022-06-07T05:17:35.327228 | 2020-04-30T10:28:48 | 2020-04-30T10:28:48 | 260,180,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,741 | py | ## Import libraries
import matplotlib.pyplot as plt
import os, sys, time, imp, numpy
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import QThread, SIGNAL
import config_Swanepoel
class my_Thread(QThread):
def __init__(self, *argv):
QThread.__init__(self)
self.sender=argv[0]
def __del__(self):
self.wait()
def run(self):
try:
if self.sender=='Raw data':
import get_raw
my_arg = get_raw.Get_raw()
elif self.sender=='Tmin and Tmax':
import get_Tmax_Tmin
my_arg = get_Tmax_Tmin.Get_Tmax_Tmin()
elif self.sender=='Std.Dev. in d':
import get_vary_igp
my_arg = get_vary_igp.Vary_igp()
elif self.sender=='Index n':
import get_m_d
my_arg = get_m_d.Gmd()
elif self.sender=='Absorption alpha':
import alpha
my_arg = alpha.Alpha()
elif self.sender=='Wavenumber k':
import k
my_arg = k.K_class()
self.emit(SIGNAL('pass_plots(PyQt_PyObject,PyQt_PyObject)'), my_arg, self.sender)
except Exception as inst:
if "common_xaxis" in inst.args:
self.emit(SIGNAL('excpt_common_xaxis()') )
elif "interpol" in inst.args:
self.emit(SIGNAL('excpt_interpol()') )
elif "squareroot" in inst.args:
self.emit(SIGNAL('excpt_squareroot()') )
class Run_CM110(QtGui.QWidget):
def __init__(self):
super(Run_CM110, self).__init__()
self.initUI()
def initUI(self):
################### MENU BARS START ##################
MyBar = QtGui.QMenuBar(self)
fileMenu = MyBar.addMenu("File")
fileSave = fileMenu.addAction("Save config file")
fileSave.triggered.connect(self.set_save_config)
fileSave.setShortcut('Ctrl+S')
fileSaveAs = fileMenu.addAction("Save config file as")
fileSaveAs.triggered.connect(self.saveConfigAs)
fileLoad = fileMenu.addAction("Load config from file")
fileLoad.triggered.connect(self.loadConfig)
fileLoad.setShortcut('Ctrl+O')
fileClose = fileMenu.addAction("Close")
fileClose.triggered.connect(self.close) # triggers closeEvent()
fileClose.setShortcut('Ctrl+X')
loadMenu = MyBar.addMenu("Load data")
loadSubOlis = loadMenu.addAction("OLIS sub")
loadSubFilmOlis = loadMenu.addAction("OLIS sub + thin film")
loadSubFTIR = loadMenu.addAction("FTIR sub")
loadSubFilmFTIR = loadMenu.addAction("FTIR sub + thin film")
loadSubOlis.triggered.connect(self.loadSubOlisDialog)
loadSubFilmOlis.triggered.connect(self.loadSubFilmOlisDialog)
loadSubFTIR.triggered.connect(self.loadSubFTIRDialog)
loadSubFilmFTIR.triggered.connect(self.loadSubFilmFTIRDialog)
removeMenu = MyBar.addMenu("Remove data")
removeSubOlis = removeMenu.addAction("OLIS sub")
removeSubFilmOlis = removeMenu.addAction("OLIS sub + thin film")
removeSubFTIR = removeMenu.addAction("FTIR sub")
removeSubFilmFTIR = removeMenu.addAction("FTIR sub + thin film")
removeSubOlis.triggered.connect(self.removeSubOlisDialog)
removeSubFilmOlis.triggered.connect(self.removeSubFilmOlisDialog)
removeSubFTIR.triggered.connect(self.removeSubFTIRDialog)
removeSubFilmFTIR.triggered.connect(self.removeSubFilmFTIRDialog)
helpMenu = MyBar.addMenu("Help")
helpParam = helpMenu.addAction("Instructions")
helpParam.triggered.connect(self.helpParamDialog)
contact = helpMenu.addAction("Contact")
contact.triggered.connect(self.contactDialog)
################### MENU BARS END ##################
# status info which button has been pressed
Start_lbl = QtGui.QLabel("ANALYSIS steps and plots", self)
Start_lbl.setStyleSheet("color: blue")
Step0_lbl = QtGui.QLabel("STEP 0. Plot raw data for OLIS and FTIR.", self)
Step0_lbl.setStyleSheet("color: black")
Step0_lbl.setFixedWidth(200)
Step0_lbl.setWordWrap(True)
self.Step0_Button = QtGui.QPushButton("Raw data",self)
self.button_style(self.Step0_Button,'black')
Step1_lbl = QtGui.QLabel("STEP 1. Find all the minima and maxima positions using Gaussian filter.", self)
Step1_lbl.setStyleSheet("color: black")
Step1_lbl.setFixedWidth(200)
Step1_lbl.setWordWrap(True)
self.Step1_Button = QtGui.QPushButton("Tmin and Tmax",self)
self.button_style(self.Step1_Button,'black')
Step2_lbl = QtGui.QLabel("STEP 2. Minimize standard deviation in the film thickness d.", self)
Step2_lbl.setStyleSheet("color: black")
Step2_lbl.setFixedWidth(200)
Step2_lbl.setWordWrap(True)
self.Step2_Button = QtGui.QPushButton("Std.Dev. in d",self)
self.button_style(self.Step2_Button,'black')
Step3_lbl = QtGui.QLabel("STEP 3. Plot refractive indicies n1 and n2.", self)
Step3_lbl.setStyleSheet("color: black")
Step3_lbl.setFixedWidth(200)
Step3_lbl.setWordWrap(True)
self.Step3_Button = QtGui.QPushButton("Index n",self)
self.button_style(self.Step3_Button,'black')
Step4_lbl = QtGui.QLabel("STEP 4. Plot abosorption alpha based on n2.", self)
Step4_lbl.setStyleSheet("color: black")
Step4_lbl.setFixedWidth(200)
Step4_lbl.setWordWrap(True)
self.Step4_Button = QtGui.QPushButton("Absorption alpha",self)
self.button_style(self.Step4_Button,'black')
Step5_lbl = QtGui.QLabel("STEP 5. Plot wavenumber k based on n2.", self)
Step5_lbl.setStyleSheet("color: black")
Step5_lbl.setFixedWidth(200)
Step5_lbl.setWordWrap(True)
self.Step5_Button = QtGui.QPushButton("Wavenumber k",self)
self.button_style(self.Step5_Button,'black')
####################################################
# status info which button has been pressed
NewFiles_lbl = QtGui.QLabel("NEWLY created and saved files with a timetrace", self)
NewFiles_lbl.setStyleSheet("color: blue")
self.NewFiles = numpy.zeros(5,dtype=object)
for i in range(4):
self.NewFiles[i] = QtGui.QLabel(''.join([str(i+1),': ']), self)
self.NewFiles[i].setStyleSheet("color: magenta")
####################################################
loads_lbl = QtGui.QLabel("RAW data files", self)
loads_lbl.setStyleSheet("color: blue")
configFile_lbl = QtGui.QLabel("Current config file", self)
self.config_file_lbl = QtGui.QLabel("", self)
self.config_file_lbl.setStyleSheet("color: green")
loadSubOlis_lbl = QtGui.QLabel("OLIS sub", self)
self.loadSubOlisFile_lbl = QtGui.QLabel("", self)
self.loadSubOlisFile_lbl.setStyleSheet("color: magenta")
loadSubFilmOlis_lbl = QtGui.QLabel("OLIS sub + thin film", self)
self.loadSubFilmOlisFile_lbl = QtGui.QLabel("", self)
self.loadSubFilmOlisFile_lbl.setStyleSheet("color: magenta")
loadSubFTIR_lbl = QtGui.QLabel("FTIR sub", self)
self.loadSubFTIRFile_lbl = QtGui.QLabel("", self)
self.loadSubFTIRFile_lbl.setStyleSheet("color: magenta")
loadSubFilmFTIR_lbl = QtGui.QLabel("FTIR sub + thin film", self)
self.loadSubFilmFTIRFile_lbl = QtGui.QLabel("", self)
self.loadSubFilmFTIRFile_lbl.setStyleSheet("color: magenta")
self.cb_sub_olis = QtGui.QCheckBox('',self)
self.cb_sub_olis.toggle()
self.cb_subfilm_olis = QtGui.QCheckBox('',self)
self.cb_subfilm_olis.toggle()
self.cb_sub_ftir = QtGui.QCheckBox('',self)
self.cb_sub_ftir.toggle()
self.cb_subfilm_ftir = QtGui.QCheckBox('',self)
self.cb_subfilm_ftir.toggle()
plot_X_lbl = QtGui.QLabel("Plot X axis in", self)
self.combo2 = QtGui.QComboBox(self)
self.mylist2=["eV","nm"]
self.combo2.addItems(self.mylist2)
self.combo2.setFixedWidth(70)
####################################################
lbl1 = QtGui.QLabel("GAUSSIAN filter settings", self)
lbl1.setStyleSheet("color: blue")
interpol_lbl = QtGui.QLabel("Interpolation method", self)
self.combo4 = QtGui.QComboBox(self)
self.mylist4=["spline","linear"]
self.combo4.addItems(self.mylist4)
self.combo4.setFixedWidth(70)
factors_lbl = QtGui.QLabel("Gaussian factors", self)
self.factorsEdit = QtGui.QLineEdit("",self)
self.factorsEdit.setFixedWidth(200)
borders_lbl = QtGui.QLabel("Gaussian borders [eV]", self)
self.bordersEdit = QtGui.QLineEdit("",self)
self.bordersEdit.setFixedWidth(200)
##############################################
lbl2 = QtGui.QLabel("ABSORPTION alpha and n1 and n2", self)
lbl2.setStyleSheet("color: blue")
poly_lbl = QtGui.QLabel("Polyfit order", self)
self.combo1 = QtGui.QComboBox(self)
self.mylist1=["1","2","3","4","5"]
self.combo1.addItems(self.mylist1)
self.combo1.setFixedWidth(70)
polybord_lbl = QtGui.QLabel("Polyfit range(s) [eV]", self)
self.poly_bordersEdit = QtGui.QLineEdit("",self)
self.poly_bordersEdit.setFixedWidth(140)
self.cb_polybord = QtGui.QCheckBox('',self)
self.cb_polybord.toggle()
ignore_data_lbl = QtGui.QLabel("No. of ignored points", self)
self.ignore_data_ptsEdit = QtGui.QLineEdit("",self)
self.ignore_data_ptsEdit.setFixedWidth(140)
corr_slit_lbl = QtGui.QLabel("Correction slit width [nm]", self)
self.corr_slitEdit = QtGui.QLineEdit("",self)
self.corr_slitEdit.setFixedWidth(140)
##############################################
lbl4 = QtGui.QLabel("STORAGE location (file, folder)", self)
lbl4.setStyleSheet("color: blue")
self.filenameEdit = QtGui.QLineEdit("",self)
self.folderEdit = QtGui.QLineEdit("",self)
self.filenameEdit.setFixedWidth(180)
self.folderEdit.setFixedWidth(180)
self.cb_save_figs = QtGui.QCheckBox('Save figs',self)
self.cb_save_figs.toggle()
##############################################
self.lcd = QtGui.QLCDNumber(self)
self.lcd.setStyleSheet("color: red")
self.lcd.setFixedHeight(60)
self.lcd.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcd.setNumDigits(11)
##############################################
# Add all widgets
g1_0 = QtGui.QGridLayout()
g1_0.addWidget(MyBar,0,0)
g1_1 = QtGui.QGridLayout()
g1_1.addWidget(loads_lbl,0,0)
g1_1.addWidget(configFile_lbl,1,0)
g1_1.addWidget(self.config_file_lbl,1,1)
g1_1.addWidget(loadSubOlis_lbl,2,0)
g1_1.addWidget(self.loadSubOlisFile_lbl,2,1)
g1_1.addWidget(self.cb_sub_olis,2,2)
g1_1.addWidget(loadSubFilmOlis_lbl,3,0)
g1_1.addWidget(self.loadSubFilmOlisFile_lbl,3,1)
g1_1.addWidget(self.cb_subfilm_olis,3,2)
g1_1.addWidget(loadSubFTIR_lbl,4,0)
g1_1.addWidget(self.loadSubFTIRFile_lbl,4,1)
g1_1.addWidget(self.cb_sub_ftir,4,2)
g1_1.addWidget(loadSubFilmFTIR_lbl,5,0)
g1_1.addWidget(self.loadSubFilmFTIRFile_lbl,5,1)
g1_1.addWidget(self.cb_subfilm_ftir,5,2)
g1_1.addWidget(plot_X_lbl,6,0)
g1_1.addWidget(self.combo2,6,1)
g1_2 = QtGui.QGridLayout()
g1_2.addWidget(lbl1,0,0)
g1_3 = QtGui.QGridLayout()
g1_3.addWidget(interpol_lbl,0,0)
g1_3.addWidget(self.combo4,0,1)
g1_3.addWidget(factors_lbl,1,0)
g1_3.addWidget(self.factorsEdit,1,1)
g1_3.addWidget(borders_lbl,2,0)
g1_3.addWidget(self.bordersEdit,2,1)
g1_4 = QtGui.QGridLayout()
g1_4.addWidget(lbl2,0,0)
g1_5 = QtGui.QGridLayout()
g1_5.addWidget(poly_lbl,0,0)
g1_5.addWidget(self.combo1,0,1)
g1_5.addWidget(polybord_lbl,1,0)
g1_5.addWidget(self.poly_bordersEdit,1,1)
g1_5.addWidget(self.cb_polybord,1,2)
g1_5.addWidget(ignore_data_lbl,2,0)
g1_5.addWidget(self.ignore_data_ptsEdit,2,1)
g1_5.addWidget(corr_slit_lbl,3,0)
g1_5.addWidget(self.corr_slitEdit,3,1)
g4_0 = QtGui.QGridLayout()
g4_0.addWidget(lbl4,0,0)
g4_0.addWidget(self.cb_save_figs,0,1)
g4_1 = QtGui.QGridLayout()
g4_1.addWidget(self.filenameEdit,0,0)
g4_1.addWidget(self.folderEdit,0,1)
v1 = QtGui.QVBoxLayout()
v1.addLayout(g1_0)
v1.addLayout(g1_1)
v1.addLayout(g1_2)
v1.addLayout(g1_3)
v1.addLayout(g1_4)
v1.addLayout(g1_5)
v1.addLayout(g4_0)
v1.addLayout(g4_1)
###################################################
g1_6 = QtGui.QGridLayout()
g1_6.addWidget(Start_lbl,0,0)
g1_7 = QtGui.QGridLayout()
g1_7.addWidget(Step0_lbl,0,0)
g1_7.addWidget(self.Step0_Button,0,1)
g1_7.addWidget(Step1_lbl,1,0)
g1_7.addWidget(self.Step1_Button,1,1)
g1_7.addWidget(Step2_lbl,2,0)
g1_7.addWidget(self.Step2_Button,2,1)
g1_7.addWidget(Step3_lbl,3,0)
g1_7.addWidget(self.Step3_Button,3,1)
g1_7.addWidget(Step4_lbl,4,0)
g1_7.addWidget(self.Step4_Button,4,1)
g1_7.addWidget(Step5_lbl,5,0)
g1_7.addWidget(self.Step5_Button,5,1)
g1_8 = QtGui.QGridLayout()
g1_8.addWidget(NewFiles_lbl,0,0)
for i in range(4):
g1_8.addWidget(self.NewFiles[i],1+i,0)
g1_8.addWidget(self.lcd,2+i,0)
v0 = QtGui.QVBoxLayout()
v0.addLayout(g1_6)
v0.addLayout(g1_7)
v0.addLayout(g1_8)
# SET ALL VERTICAL COLUMNS TOGETHER
hbox = QtGui.QHBoxLayout()
hbox.addLayout(v1)
hbox.addLayout(v0)
self.setLayout(hbox)
###############################################################################
# reacts to choises picked in the menu
self.combo1.activated[str].connect(self.onActivated1)
self.combo2.activated[str].connect(self.onActivated2)
self.combo4.activated[str].connect(self.onActivated4)
# reacts to choises picked in the menu
self.Step0_Button.clicked.connect(self.set_run)
self.Step1_Button.clicked.connect(self.set_run)
self.Step2_Button.clicked.connect(self.set_run)
self.Step3_Button.clicked.connect(self.set_run)
self.Step4_Button.clicked.connect(self.set_run)
self.Step5_Button.clicked.connect(self.set_run)
# reacts to choises picked in the checkbox
self.cb_sub_olis.stateChanged.connect(self.sub_olis_check)
self.cb_subfilm_olis.stateChanged.connect(self.subfilm_olis_check)
self.cb_sub_ftir.stateChanged.connect(self.sub_ftir_check)
self.cb_subfilm_ftir.stateChanged.connect(self.subfilm_ftir_check)
self.cb_save_figs.stateChanged.connect(self.save_figs_check)
self.cb_polybord.stateChanged.connect(self.polybord_check)
self.move(0,0)
#self.setGeometry(50, 50, 800, 500)
hbox.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.setWindowTitle("Swanepoel method for determination of thickness and optical constants for thin films")
self.show()
try:
# Initial read of the config file
self.config_file = config_Swanepoel.current_config_file
head, tail = os.path.split(self.config_file)
sys.path.insert(0, head)
self.cf = __import__(tail[:-3])
# load all relevant parameters
self.loadSubOlis_str = self.cf.loadSubOlis[0]
self.loadSubFilmOlis_str = self.cf.loadSubFilmOlis[0]
self.loadSubFTIR_str = self.cf.loadSubFTIR[0]
self.loadSubFilmFTIR_str = self.cf.loadSubFilmFTIR[0]
self.loadSubOlis_check = self.cf.loadSubOlis[1]
self.loadSubFilmOlis_check = self.cf.loadSubFilmOlis[1]
self.loadSubFTIR_check = self.cf.loadSubFTIR[1]
self.loadSubFilmFTIR_check = self.cf.loadSubFilmFTIR[1]
self.fit_linear_spline=self.cf.fit_linear_spline
self.gaussian_factors=self.cf.gaussian_factors
self.gaussian_borders=self.cf.gaussian_borders
self.fit_poly_order=self.cf.fit_poly_order
self.ignore_data_pts=self.cf.ignore_data_pts
self.corr_slit=self.cf.corr_slit
self.fit_poly_ranges=self.cf.fit_poly_ranges[0]
self.fit_poly_ranges_check=self.cf.fit_poly_ranges[1]
self.filename_str=self.cf.filename
self.folder_str=self.cf.folder
self.timestr=self.cf.timestr
self.save_figs=self.cf.save_figs
self.plot_X=self.cf.plot_X
self.set_field_vals()
except Exception,e:
QtGui.QMessageBox.critical(self, 'Message', "Could not load from the selected config file!")
def set_field_vals(self):
head, tail = os.path.split(self.config_file)
self.config_file_lbl.setText(tail)
head, tail = os.path.split(self.loadSubOlis_str)
self.loadSubOlisFile_lbl.setText(tail)
head, tail = os.path.split(self.loadSubFilmOlis_str)
self.loadSubFilmOlisFile_lbl.setText(tail)
head, tail = os.path.split(self.loadSubFTIR_str)
self.loadSubFTIRFile_lbl.setText(tail)
head, tail = os.path.split(self.loadSubFilmFTIR_str)
self.loadSubFilmFTIRFile_lbl.setText(tail)
##############################################
self.sub_olis_check(self.loadSubOlis_check)
self.cb_sub_olis.setChecked(self.loadSubOlis_check)
if self.loadSubOlis_str=='':
self.cb_sub_olis.setEnabled(False)
self.subfilm_olis_check(self.loadSubFilmOlis_check)
self.cb_subfilm_olis.setChecked(self.loadSubFilmOlis_check)
if self.loadSubFilmOlis_str=='':
self.cb_subfilm_olis.setEnabled(False)
self.sub_ftir_check(self.loadSubFTIR_check)
self.cb_sub_ftir.setChecked(self.loadSubFTIR_check)
if self.loadSubFTIR_str=='':
self.cb_sub_ftir.setEnabled(False)
self.subfilm_ftir_check(self.loadSubFilmFTIR_check)
self.cb_subfilm_ftir.setChecked(self.loadSubFilmFTIR_check)
if self.loadSubFilmFTIR_str=='':
self.cb_subfilm_ftir.setEnabled(False)
self.save_figs_check(self.save_figs)
self.cb_save_figs.setChecked(self.save_figs)
##############################################
if len(self.fit_poly_ranges)==0:
self.fit_poly_ranges_check=False
self.polybord_check(self.fit_poly_ranges_check)
self.cb_polybord.setChecked(self.fit_poly_ranges_check)
else:
self.polybord_check(self.fit_poly_ranges_check)
self.cb_polybord.setChecked(self.fit_poly_ranges_check)
##############################################
self.factorsEdit.setText(', '.join([str(i) for i in self.gaussian_factors] ))
self.bordersEdit.setText(', '.join([str(i) for i in self.gaussian_borders] ))
##############################################
self.combo1.setCurrentIndex(self.mylist1.index(str(self.fit_poly_order)))
self.combo2.setCurrentIndex(self.mylist2.index(str(self.plot_X)))
self.combo4.setCurrentIndex(self.mylist4.index(self.fit_linear_spline))
##############################################
self.poly_bordersEdit.setText(', '.join([str(i) for i in self.fit_poly_ranges] ))
self.ignore_data_ptsEdit.setText(str(self.ignore_data_pts))
self.corr_slitEdit.setText(str(self.corr_slit))
##############################################
self.filenameEdit.setText(str(self.filename_str))
self.folderEdit.setText(str(self.folder_str))
self.lcd.display(self.timestr)
def button_style(self,button,color):
button.setStyleSheet(''.join(['QPushButton {background-color: lightblue; font-size: 18pt; color: ',color,'}']))
button.setFixedWidth(230)
button.setFixedHeight(60)
def loadConfig(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load Config File Dialog',self.config_file)
if fname:
self.config_file = str(fname)
head, tail = os.path.split(str(fname))
sys.path.insert(0, head)
self.set_load_config(tail)
def saveConfigAs(self):
fname = QtGui.QFileDialog.getSaveFileName(self, 'Save Config File Dialog',self.config_file)
if fname:
self.set_save_config_as(str(fname))
def loadSubOlisDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',self.loadSubOlis_str)
if fname:
self.loadSubOlis_str = str(fname)
head, tail = os.path.split(str(fname))
self.loadSubOlisFile_lbl.setText(tail)
self.cb_sub_olis.setEnabled(True)
def loadSubFilmOlisDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',self.loadSubFilmOlis_str)
if fname:
self.loadSubFilmOlis_str = str(fname)
head, tail = os.path.split(str(fname))
self.loadSubFilmOlisFile_lbl.setText(tail)
self.cb_subfilm_olis.setEnabled(True)
def loadSubFTIRDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',self.loadSubFTIR_str)
if fname:
self.loadSubFTIR_str = str(fname)
head, tail = os.path.split(str(fname))
self.loadSubFTIRFile_lbl.setText(tail)
self.cb_sub_ftir.setEnabled(True)
def loadSubFilmFTIRDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',self.loadSubFilmFTIR_str)
if fname:
self.loadSubFilmFTIR_str = str(fname)
head, tail = os.path.split(str(fname))
self.loadSubFilmFTIRFile_lbl.setText(tail)
self.cb_subfilm_ftir.setEnabled(True)
def removeSubOlisDialog(self):
self.loadSubOlis_str = ''
self.loadSubOlisFile_lbl.setText(self.loadSubOlis_str)
self.loadSubOlis_check=False
self.sub_olis_check(self.loadSubOlis_check)
self.cb_sub_olis.setChecked(self.loadSubOlis_check)
self.cb_sub_olis.setEnabled(False)
def removeSubFilmOlisDialog(self):
self.loadSubFilmOlis_str = ''
self.loadSubFilmOlisFile_lbl.setText(self.loadSubFilmOlis_str)
self.loadSubFilmOlis_check = False
self.subfilm_olis_check(self.loadSubFilmOlis_check)
self.cb_subfilm_olis.setChecked(self.loadSubFilmOlis_check)
self.cb_subfilm_olis.setEnabled(False)
def removeSubFTIRDialog(self):
self.loadSubFTIR_str = ''
self.loadSubFTIRFile_lbl.setText(self.loadSubFTIR_str)
self.loadSubFTIR_check = False
self.sub_ftir_check(self.loadSubFTIR_check)
self.cb_sub_ftir.setChecked(self.loadSubFTIR_check)
self.cb_sub_ftir.setEnabled(False)
def removeSubFilmFTIRDialog(self):
self.loadSubFilmFTIR_str = ''
self.loadSubFilmFTIRFile_lbl.setText(self.loadSubFilmFTIR_str)
self.loadSubFilmFTIR_check = False
self.subfilm_ftir_check(self.loadSubFilmFTIR_check)
self.cb_subfilm_ftir.setChecked(self.loadSubFilmFTIR_check)
self.cb_subfilm_ftir.setEnabled(False)
def helpParamDialog(self):
helpfile=''
with open('config_Swanepoel_forklaringer.py','r') as f:
for line in f:
helpfile = helpfile+line
msg = QtGui.QMessageBox()
msg.setIcon(QtGui.QMessageBox.Information)
msg.setText("Apply Swanepoel analysis using following steps:")
msg.setInformativeText(helpfile)
msg.setWindowTitle("Help")
#msg.setDetailedText(helpfile)
msg.setStandardButtons(QtGui.QMessageBox.Ok)
#msg.setGeometry(1000, 0, 1000+250, 350)
msg.exec_()
def contactDialog(self):
QtGui.QMessageBox.information(self, "Contact information","Suggestions, comments or bugs can be reported to [email protected]")
def onActivated1(self, text):
self.fit_poly_order = int(text)
def onActivated2(self, text):
self.plot_X = str(text)
def onActivated4(self, text):
self.fit_linear_spline=str(text)
def save_figs_check(self, state):
if state in [QtCore.Qt.Checked,True]:
self.save_figs=True
else:
self.save_figs=False
def sub_olis_check(self, state):
if state in [QtCore.Qt.Checked,True]:
self.loadSubOlis_check=True
self.loadSubOlisFile_lbl.setStyleSheet("color: magenta")
self.cb_sub_olis.setText('incl')
else:
self.loadSubOlis_check=False
self.loadSubOlisFile_lbl.setStyleSheet("color: grey")
self.cb_sub_olis.setText('exc')
def subfilm_olis_check(self, state):
if state in [QtCore.Qt.Checked,True]:
self.loadSubFilmOlis_check=True
self.loadSubFilmOlisFile_lbl.setStyleSheet("color: magenta")
self.cb_subfilm_olis.setText('incl')
else:
self.loadSubFilmOlis_check=False
self.loadSubFilmOlisFile_lbl.setStyleSheet("color: grey")
self.cb_subfilm_olis.setText('exc')
def sub_ftir_check(self, state):
if state in [QtCore.Qt.Checked,True]:
self.loadSubFTIR_check=True
self.loadSubFTIRFile_lbl.setStyleSheet("color: magenta")
self.cb_sub_ftir.setText('incl')
else:
self.loadSubFTIR_check=False
self.loadSubFTIRFile_lbl.setStyleSheet("color: grey")
self.cb_sub_ftir.setText('exc')
def subfilm_ftir_check(self, state):
if state in [QtCore.Qt.Checked,True]:
self.loadSubFilmFTIR_check=True
self.loadSubFilmFTIRFile_lbl.setStyleSheet("color: magenta")
self.cb_subfilm_ftir.setText('incl')
else:
self.loadSubFilmFTIR_check=False
self.loadSubFilmFTIRFile_lbl.setStyleSheet("color: grey")
self.cb_subfilm_ftir.setText('exc')
def polybord_check(self, state):
if state in [QtCore.Qt.Checked,True]:
self.fit_poly_ranges_check=True
self.poly_bordersEdit.setEnabled(True)
self.cb_polybord.setText('incl')
else:
self.fit_poly_ranges_check=False
self.poly_bordersEdit.setEnabled(False)
self.cb_polybord.setText('exc')
############################################################
# Check input if a number, ie. digits or fractions such as 3.141
# Source: http://www.pythoncentral.io/how-to-check-if-a-string-is-a-number-in-python-including-unicode/
def is_int(self,s):
try:
int(s)
return True
except ValueError:
return False
def is_number(self,s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def set_run(self):
sender = self.sender()
## gaussian_borders and gaussian_factors warnings and errors
gaus_bord=str(self.bordersEdit.text()).split(',')
for tal in gaus_bord:
if not self.is_number(tal):
QtGui.QMessageBox.critical(self, 'Message', "Gaussian borders must be real numbers!")
return None
elif float(tal)<0.0:
QtGui.QMessageBox.critical(self, 'Message', "Gaussian borders must be positive or zero!")
return None
if len(gaus_bord) < 2:
QtGui.QMessageBox.critical(self, 'Message', "You must enter at least 2 gaussian borders!")
return None
if not numpy.array_equal([numpy.float(i) for i in gaus_bord],numpy.sort([numpy.float(i) for i in gaus_bord])):
QtGui.QMessageBox.critical(self, 'Message', "The gaussian borders must be entered in the ascending order!")
return None
gaus_fact=str(self.factorsEdit.text()).split(',')
for tal in gaus_fact:
if not self.is_number(tal):
QtGui.QMessageBox.critical(self, 'Message', "Gaussian factors must be real numbers!")
return None
elif float(tal)<0.0:
QtGui.QMessageBox.critical(self, 'Message', "Gaussian factors must be positive or zero!")
return None
if len(gaus_fact) < 1:
QtGui.QMessageBox.critical(self, 'Message', "You must enter at least 1 gaussian factor!")
return None
if len(gaus_bord) != len(gaus_fact)+1:
QtGui.QMessageBox.critical(self, 'Message', "The number of gaussian factors is exactly one less than the number of gaussian borders!")
return None
## ignored data points warnings and errors
ign_pts=str(self.ignore_data_ptsEdit.text())
if not self.is_int(ign_pts):
QtGui.QMessageBox.critical(self, 'Message', "The number of ignored points is an integer!")
return None
elif int(ign_pts)<0:
QtGui.QMessageBox.critical(self, 'Message', "The number of ignored points is a positive integer!")
return None
## correction slit width warnings and errors
corr_pts=str(self.corr_slitEdit.text())
if not self.is_number(corr_pts):
QtGui.QMessageBox.critical(self, 'Message', "The correction slit width is a real number!")
return None
elif float(corr_pts)<0:
QtGui.QMessageBox.critical(self, 'Message', "The correction slit width is a positive number!")
return None
## fit_poly_ranges warnings and errors
if self.fit_poly_ranges_check==True:
polyfit_bord=str(self.poly_bordersEdit.text()).split(',')
for tal in polyfit_bord:
if not self.is_number(tal):
QtGui.QMessageBox.critical(self, 'Message', "The polyfit range enteries must be real numbers!")
return None
elif float(tal)<0.0:
QtGui.QMessageBox.critical(self, 'Message', "The polyfit range enteries must be positive or zero!")
return None
if len(polyfit_bord)<2 or len(polyfit_bord)%2!=0:
QtGui.QMessageBox.critical(self, 'Message', "The polyfit range list accepts minimum 2 or even number of enteries!")
return None
if not numpy.array_equal([numpy.float(i) for i in polyfit_bord],numpy.sort([numpy.float(i) for i in polyfit_bord])):
QtGui.QMessageBox.critical(self, 'Message', "The polyfit range list must be entered in ascending order!")
return None
# When all user defined enteries are approved save the data
self.set_save_config()
if sender.text()=='Plot raw data':
if not self.loadSubOlis_check and not self.loadSubFilmOlis_check and not self.loadSubFTIR_check and not self.loadSubFilmFTIR_check:
QtGui.QMessageBox.critical(self, 'Message', "No raw data files selected!")
return None
if sender.text()!='Plot raw data':
## raw data files warnings and errors
if not self.loadSubOlis_check and not self.loadSubFilmOlis_check:
pass
elif self.loadSubOlis_check and self.loadSubFilmOlis_check:
pass
else:
QtGui.QMessageBox.critical(self, 'Message', "Select both OLIS data files subfilmRAW and subRAW!")
return None
if not self.loadSubFTIR_check and not self.loadSubFilmFTIR_check:
pass
elif self.loadSubFTIR_check and self.loadSubFilmFTIR_check:
pass
else:
QtGui.QMessageBox.critical(self, 'Message', "Select both FTIR data files subfilmRAW and subRAW!")
return None
if not self.loadSubOlis_check and not self.loadSubFilmOlis_check and not self.loadSubFTIR_check and not self.loadSubFilmFTIR_check:
QtGui.QMessageBox.critical(self, 'Message', "No data files selected!")
return None
if sender.text()=='Plot raw data':
self.button_style(self.Step0_Button,'red')
self.button_style(self.Step1_Button,'grey')
self.button_style(self.Step2_Button,'grey')
self.button_style(self.Step3_Button,'grey')
self.button_style(self.Step4_Button,'grey')
self.button_style(self.Step5_Button,'grey')
elif sender.text()=='Find Tmin and Tmax':
self.button_style(self.Step1_Button,'red')
self.button_style(self.Step0_Button,'grey')
self.button_style(self.Step2_Button,'grey')
self.button_style(self.Step3_Button,'grey')
self.button_style(self.Step4_Button,'grey')
self.button_style(self.Step5_Button,'grey')
elif sender.text()=='Find dispersion in d':
self.button_style(self.Step2_Button,'red')
self.button_style(self.Step0_Button,'grey')
self.button_style(self.Step1_Button,'grey')
self.button_style(self.Step3_Button,'grey')
self.button_style(self.Step4_Button,'grey')
self.button_style(self.Step5_Button,'grey')
elif sender.text()=='Plot n':
self.button_style(self.Step3_Button,'red')
self.button_style(self.Step0_Button,'grey')
self.button_style(self.Step1_Button,'grey')
self.button_style(self.Step2_Button,'grey')
self.button_style(self.Step4_Button,'grey')
self.button_style(self.Step5_Button,'grey')
elif sender.text()=='Plot absorption':
self.button_style(self.Step4_Button,'red')
self.button_style(self.Step0_Button,'grey')
self.button_style(self.Step1_Button,'grey')
self.button_style(self.Step2_Button,'grey')
self.button_style(self.Step3_Button,'grey')
self.button_style(self.Step5_Button,'grey')
elif sender.text()=='Plot wavenumber k':
self.button_style(self.Step5_Button,'red')
self.button_style(self.Step0_Button,'grey')
self.button_style(self.Step1_Button,'grey')
self.button_style(self.Step2_Button,'grey')
self.button_style(self.Step3_Button,'grey')
self.button_style(self.Step4_Button,'grey')
self.get_my_Thread=my_Thread(sender.text())
self.connect(self.get_my_Thread,SIGNAL("pass_plots(PyQt_PyObject,PyQt_PyObject)"),self.pass_plots)
self.connect(self.get_my_Thread,SIGNAL("excpt_common_xaxis()"),self.excpt_common_xaxis)
self.connect(self.get_my_Thread,SIGNAL("excpt_interpol()"),self.excpt_interpol)
self.connect(self.get_my_Thread,SIGNAL("excpt_squareroot()"),self.excpt_squareroot)
self.connect(self.get_my_Thread,SIGNAL('finished()'),self.set_finished)
self.get_my_Thread.start()
def excpt_common_xaxis(self):
QtGui.QMessageBox.critical(self, 'Message', "Tmin and Tmax curves have x values in different ranges, ie. no overlap is found. Inspect the raw data and adjust the gaussian borders and the gaussian factors!")
def excpt_interpol(self):
QtGui.QMessageBox.critical(self, 'Message', "Could not interpolate x_data for T_sub. Probably the x_data in Tr covers wider range than the x_data in T_sub.")
def excpt_squareroot(self):
QtGui.QMessageBox.critical(self, 'Message', "Can not take squareroot of negative numbers! The calculated refractive index n might not be physical.")
def pass_plots(self,my_obj,sender):
for tal in range(4):
self.NewFiles[tal].setText(''.join([str(tal+1),': ']))
try:
data_names=my_obj.make_plots()
for i,ii in zip(data_names,range(len(data_names))):
self.NewFiles[ii].setText(''.join([str(ii+1),': ',i]))
my_obj.show_plots()
except Exception as inst:
if "common_xaxis" in inst.args:
self.excpt_common_xaxis()
elif "interpol" in inst.args:
self.excpt_interpol()
elif "squareroot" in inst.args:
self.excpt_squareroot()
def set_save_config(self):
self.timestr=time.strftime("%y%m%d-%H%M")
self.lcd.display(self.timestr)
with open(self.config_file, 'w') as thefile:
# film+substrate measurements
thefile.write( ''.join(["loadSubOlis=[\"",self.loadSubOlis_str,"\",", str(self.loadSubOlis_check),"]\n"]))
thefile.write( ''.join(["loadSubFilmOlis=[\"",self.loadSubFilmOlis_str,"\",", str(self.loadSubFilmOlis_check),"]\n"]))
thefile.write( ''.join(["loadSubFTIR=[\"",self.loadSubFTIR_str,"\",", str(self.loadSubFTIR_check),"]\n"]))
thefile.write( ''.join(["loadSubFilmFTIR=[\"",self.loadSubFilmFTIR_str,"\",", str(self.loadSubFilmFTIR_check),"]\n"]))
thefile.write( ''.join(["fit_linear_spline=\"",self.fit_linear_spline,"\"\n"]))
thefile.write( ''.join(["gaussian_factors=[",str(self.factorsEdit.text()),"]\n"]))
thefile.write( ''.join(["gaussian_borders=[",str(self.bordersEdit.text()),"]\n"]))
thefile.write( ''.join(["ignore_data_pts=",str(self.ignore_data_ptsEdit.text()),"\n"]))
thefile.write( ''.join(["corr_slit=",str(self.corr_slitEdit.text()),"\n"]))
thefile.write( ''.join(["fit_poly_order=",str(self.fit_poly_order),"\n"]))
thefile.write( ''.join(["fit_poly_ranges=[[",str(self.poly_bordersEdit.text()),"],",str(self.fit_poly_ranges_check),"]\n"]))
thefile.write( ''.join(["filename=\"",str(self.filenameEdit.text()),"\"\n"]))
thefile.write( ''.join(["folder=\"",str(self.folderEdit.text()),"\"\n"]))
thefile.write( ''.join(["timestr=\"",self.timestr,"\"\n"]))
thefile.write( ''.join(["save_figs=",str(self.save_figs),"\n"]))
thefile.write( ''.join(["plot_X=\"",self.plot_X,"\""]))
imp.reload(self.cf)
def set_save_config_as(self,config_file):
with open(config_file, 'w') as thefile:
# film+substrate measurements
thefile.write( ''.join(["loadSubOlis=[\"",self.loadSubOlis_str,"\",", str(self.loadSubOlis_check),"]\n"]))
thefile.write( ''.join(["loadSubFilmOlis=[\"",self.loadSubFilmOlis_str,"\",", str(self.loadSubFilmOlis_check),"]\n"]))
thefile.write( ''.join(["loadSubFTIR=[\"",self.loadSubFTIR_str,"\",", str(self.loadSubFTIR_check),"]\n"]))
thefile.write( ''.join(["loadSubFilmFTIR=[\"",self.loadSubFilmFTIR_str,"\",", str(self.loadSubFilmFTIR_check),"]\n"]))
thefile.write( ''.join(["fit_linear_spline=\"",self.fit_linear_spline,"\"\n"]))
thefile.write( ''.join(["gaussian_factors=[",str(self.factorsEdit.text()),"]\n"]))
thefile.write( ''.join(["gaussian_borders=[",str(self.bordersEdit.text()),"]\n"]))
thefile.write( ''.join(["ignore_data_pts=",str(self.ignore_data_ptsEdit.text()),"\n"]))
thefile.write( ''.join(["corr_slit=",str(self.corr_slitEdit.text()),"\n"]))
thefile.write( ''.join(["fit_poly_order=",str(self.fit_poly_order),"\n"]))
thefile.write( ''.join(["fit_poly_ranges=[[",str(self.poly_bordersEdit.text()),"],",str(self.fit_poly_ranges_check),"]\n"]))
thefile.write( ''.join(["filename=\"",str(self.filenameEdit.text()),"\"\n"]))
thefile.write( ''.join(["folder=\"",str(self.folderEdit.text()),"\"\n"]))
thefile.write( ''.join(["timestr=\"",self.timestr,"\"\n"]))
thefile.write( ''.join(["save_figs=",str(self.save_figs),"\n"]))
thefile.write( ''.join(["plot_X=\"",self.plot_X,"\""]))
def set_load_config(self,tail):
try:
self.cf = __import__(tail[:-3])
self.loadSubOlis_str = self.cf.loadSubOlis[0]
self.loadSubFilmOlis_str = self.cf.loadSubFilmOlis[0]
self.loadSubFTIR_str = self.cf.loadSubFTIR[0]
self.loadSubFilmFTIR_str = self.cf.loadSubFilmFTIR[0]
self.loadSubOlis_check = self.cf.loadSubOlis[1]
self.loadSubFilmOlis_check = self.cf.loadSubFilmOlis[1]
self.loadSubFTIR_check = self.cf.loadSubFTIR[1]
self.loadSubFilmFTIR_check = self.cf.loadSubFilmFTIR[1]
self.fit_linear_spline=self.cf.fit_linear_spline
self.gaussian_factors=self.cf.gaussian_factors
self.gaussian_borders=self.cf.gaussian_borders
self.fit_poly_order=self.cf.fit_poly_order
self.ignore_data_pts=self.cf.ignore_data_pts
self.corr_slit=self.cf.corr_slit
self.fit_poly_ranges=self.cf.fit_poly_ranges[0]
self.fit_poly_ranges_check=self.cf.fit_poly_ranges[1]
self.filename_str=self.cf.filename
self.folder_str=self.cf.folder
self.timestr=self.cf.timestr
self.save_figs=self.cf.save_figs
self.plot_X=self.cf.plot_X
with open("config_Swanepoel.py", 'w') as thefile:
thefile.write( ''.join(["current_config_file=\"",self.config_file,"\""]))
imp.reload(config_Swanepoel)
self.set_field_vals()
except Exception,e:
QtGui.QMessageBox.critical(self, 'Message', "Could not load from the selected config file!")
def set_finished(self):
self.button_style(self.Step0_Button,'black')
self.button_style(self.Step1_Button,'black')
self.button_style(self.Step2_Button,'black')
self.button_style(self.Step3_Button,'black')
self.button_style(self.Step4_Button,'black')
self.button_style(self.Step5_Button,'black')
def allButtons_torf(self,trueorfalse):
self.cb_save_figs.setEnabled(trueorfalse)
self.Step0_Button.setEnabled(trueorfalse)
self.Step1_Button.setEnabled(trueorfalse)
self.Step2_Button.setEnabled(trueorfalse)
self.Step3_Button.setEnabled(trueorfalse)
self.Step4_Button.setEnabled(trueorfalse)
self.Step5_Button.setEnabled(trueorfalse)
self.combo1.setEnabled(trueorfalse)
self.combo2.setEnabled(trueorfalse)
self.combo4.setEnabled(trueorfalse)
self.factorsEdit.setEnabled(trueorfalse)
self.bordersEdit.setEnabled(trueorfalse)
self.ignore_data_ptsEdit.setEnabled(trueorfalse)
self.corr_slitEdit.setEnabled(trueorfalse)
self.poly_bordersEdit.setEnabled(trueorfalse)
self.filenameEdit.setEnabled(trueorfalse)
self.folderEdit.setEnabled(trueorfalse)
def closeEvent(self,event):
reply = QtGui.QMessageBox.question(self, 'Message', "Quit now?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
elif reply == QtGui.QMessageBox.No:
event.ignore()
#########################################
#########################################
#########################################
def main():
app = QtGui.QApplication(sys.argv)
ex = Run_CM110()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
18930ec02640d88d9249a496421df84a048f1e75 | f1a3bd9ad5ef76204c24dc96f113c405ece21b6d | /main/migrations/0082_auto__add_field_profile_email_notifications__add_field_profile_email_n.py | 833ee2eb84ebdd15ca7fc46600cfbce256d7e61f | [] | no_license | JamesLinus/solidcomposer | 02f83c3731774e8008d46b418f3bf4fb5d9dab36 | ed75e576ce1c50487403437b5b537f9bfbb6397e | refs/heads/master | 2020-12-28T23:50:06.745329 | 2014-01-24T02:34:41 | 2014-01-24T02:34:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,625 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.email_notifications'
db.add_column('main_profile', 'email_notifications', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True), keep_default=False)
# Adding field 'Profile.email_newsletter'
db.add_column('main_profile', 'email_newsletter', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.email_notifications'
db.delete_column('main_profile', 'email_notifications')
# Deleting field 'Profile.email_newsletter'
db.delete_column('main_profile', 'email_newsletter')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'chat.chatroom': {
'Meta': {'object_name': 'ChatRoom'},
'blacklist': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blacklisted_users'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permission_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'whitelist': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'whitelisted_users'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'competitions.competition': {
'Meta': {'object_name': 'Competition'},
'chat_room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['chat.ChatRoom']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'have_listening_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'listening_party_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'listening_party_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'preview_rules': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'preview_theme': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'submit_deadline': ('django.db.models.fields.DateTimeField', [], {}),
'theme': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'vote_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'vote_period_length': ('django.db.models.fields.IntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.accountplan': {
'Meta': {'object_name': 'AccountPlan'},
'band_count_limit': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'total_space': ('django.db.models.fields.BigIntegerField', [], {}),
'usd_per_month': ('django.db.models.fields.FloatField', [], {})
},
'main.band': {
'Meta': {'object_name': 'Band'},
'abandon_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'concurrent_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'openness': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'total_space': ('django.db.models.fields.BigIntegerField', [], {}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '110', 'unique': 'True', 'null': 'True'}),
'used_space': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'main.bandmember': {
'Meta': {'object_name': 'BandMember'},
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Band']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space_donated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'main.profile': {
'Meta': {'object_name': 'Profile'},
'activate_code': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'assume_uploaded_plugins_owned': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'band_count_limit': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'competitions_bookmarked': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'competitions_bookmarked'", 'blank': 'True', 'to': "orm['competitions.Competition']"}),
'customer_id': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'date_activity': ('django.db.models.fields.DateTimeField', [], {}),
'email_newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.AccountPlan']", 'null': 'True', 'blank': 'True'}),
'plugins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'profile_plugins'", 'blank': 'True', 'to': "orm['workshop.PluginDepenency']"}),
'purchased_bytes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'solo_band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Band']"}),
'studios': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'profile_studios'", 'blank': 'True', 'to': "orm['workshop.Studio']"}),
'usd_per_month': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'main.song': {
'Meta': {'object_name': 'Song'},
'album': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Band']"}),
'comment_node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'song_comment_node'", 'null': 'True', 'to': "orm['main.SongCommentNode']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open_for_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_open_source': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {}),
'mp3_file': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'plugins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'song_plugins'", 'blank': 'True', 'to': "orm['workshop.PluginDepenency']"}),
'source_file': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'studio': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workshop.Studio']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'waveform_img': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'})
},
'main.songcommentnode': {
'Meta': {'object_name': 'SongCommentNode'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'date_edited': ('django.db.models.fields.DateTimeField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.SongCommentNode']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reply_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Song']", 'null': 'True', 'blank': 'True'})
},
'main.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'main.tempfile': {
'Meta': {'object_name': 'TempFile'},
'death_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 8, 15, 5, 28, 52, 509297)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'workshop.plugindepenency': {
'Meta': {'object_name': 'PluginDepenency'},
'comes_with_studio': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workshop.Studio']", 'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plugin_type': ('django.db.models.fields.IntegerField', [], {}),
'price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'workshop.studio': {
'Meta': {'object_name': 'Studio'},
'canMerge': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'canReadFile': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'canRender': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '500', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'logo_16x16': ('django.db.models.fields.files.ImageField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'logo_large': ('django.db.models.fields.files.ImageField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['main']
| [
"[email protected]"
] | |
df7281a7926eb33f1778ef246c2fdeca5fbffa99 | aafc9140c662fcb2b36fb092cbf861d80e4da7e9 | /examples/misc/chained_callbacks.py | 87879ff42488aca6c81fd210a4a5cc2f14054791 | [] | no_license | alecordev/dashing | 12fb8d303143130f3351c8042615a0f7497f59cf | aac810147f8459834b6c693291b1276e8a84c36e | refs/heads/master | 2023-02-18T08:55:22.410205 | 2022-04-07T08:17:37 | 2022-04-07T08:17:37 | 99,436,393 | 0 | 0 | null | 2023-02-16T03:20:21 | 2017-08-05T17:01:29 | CSS | UTF-8 | Python | false | false | 1,473 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
all_options = {
"America": ["New York City", "San Francisco", "Cincinnati"],
"Canada": ["Montréal", "Toronto", "Ottawa"],
}
app.layout = html.Div(
[
dcc.RadioItems(
id="countries-radio",
options=[{"label": k, "value": k} for k in all_options.keys()],
value="America",
),
html.Hr(),
dcc.RadioItems(id="cities-radio"),
html.Hr(),
html.Div(id="display-selected-values"),
]
)
@app.callback(Output("cities-radio", "options"), [Input("countries-radio", "value")])
def set_cities_options(selected_country):
return [{"label": i, "value": i} for i in all_options[selected_country]]
@app.callback(Output("cities-radio", "value"), [Input("cities-radio", "options")])
def set_cities_value(available_options):
return available_options[0]["value"]
@app.callback(
Output("display-selected-values", "children"),
[Input("countries-radio", "value"), Input("cities-radio", "value")],
)
def set_display_children(selected_country, selected_city):
return "{} is a city in {}".format(
selected_city,
selected_country,
)
if __name__ == "__main__":
app.run_server(debug=True)
| [
"[email protected]"
] | |
29a2cd8efc2aaa2e4516c00dfb1c4ee3a55e932d | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/fiftyPercent/rank_2j1k_T.py | 01f79cb0649b2afd54bbb504d7a26370bac53377 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,390 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2j1k.csv'
identifier = 'T'
coefFrac = 0.5
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fiftyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fiftyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
] | |
3b08994748c30a31baf779c095991557e4427e44 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/rasbt_mlxtend/mlxtend-master/mlxtend/classifier/softmax_regression.py | 04e5d621bb0f443e834b5ed9ae559e12551abd2b | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,868 | py | # Sebastian Raschka 2014-2017
# mlxtend Machine Learning Library Extensions
#
# Implementation of the mulitnomial logistic regression algorithm for
# classification.
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: BSD 3 clause
import numpy as np
from time import time
from .._base import _BaseModel
from .._base import _IterativeModel
from .._base import _MultiClass
from .._base import _Classifier
class SoftmaxRegression(_BaseModel, _IterativeModel, _MultiClass, _Classifier):
"""Softmax regression classifier.
Parameters
------------
eta : float (default: 0.01)
Learning rate (between 0.0 and 1.0)
epochs : int (default: 50)
Passes over the training dataset.
Prior to each epoch, the dataset is shuffled
if `minibatches > 1` to prevent cycles in stochastic gradient descent.
l2 : float
Regularization parameter for L2 regularization.
No regularization if l2=0.0.
minibatches : int (default: 1)
The number of minibatches for gradient-based optimization.
If 1: Gradient Descent learning
If len(y): Stochastic Gradient Descent (SGD) online learning
If 1 < minibatches < len(y): SGD Minibatch learning
n_classes : int (default: None)
A positive integer to declare the number of class labels
if not all class labels are present in a partial training set.
Gets the number of class labels automatically if None.
random_seed : int (default: None)
Set random state for shuffling and initializing the weights.
print_progress : int (default: 0)
Prints progress in fitting to stderr.
0: No output
1: Epochs elapsed and cost
2: 1 plus time elapsed
3: 2 plus estimated time until completion
Attributes
-----------
w_ : 2d-array, shape={n_features, 1}
Model weights after fitting.
b_ : 1d-array, shape={1,}
Bias unit after fitting.
cost_ : list
List of floats, the average cross_entropy for each epoch.
"""
def __init__(self, eta=0.01, epochs=50,
l2=0.0,
minibatches=1,
n_classes=None,
random_seed=None,
print_progress=0):
self.eta = eta
self.epochs = epochs
self.l2 = l2
self.minibatches = minibatches
self.n_classes = n_classes
self.random_seed = random_seed
self.print_progress = print_progress
self._is_fitted = False
def _net_input(self, X, W, b):
return (X.dot(W) + b)
def _softmax(self, z):
e_x = np.exp(z - z.max(axis=1, keepdims=True))
out = e_x / e_x.sum(axis=1, keepdims=True)
return out
# return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T
def _cross_entropy(self, output, y_target):
return - np.sum(np.log(output) * (y_target), axis=1)
def _cost(self, cross_entropy):
L2_term = self.l2 * np.sum(self.w_ ** 2)
cross_entropy = cross_entropy + L2_term
return 0.5 * np.mean(cross_entropy)
def _to_classlabels(self, z):
return z.argmax(axis=1)
def _fit(self, X, y, init_params=True):
self._check_target_array(y)
if init_params:
if self.n_classes is None:
self.n_classes = np.max(y) + 1
self._n_features = X.shape[1]
self.b_, self.w_ = self._init_params(
weights_shape=(self._n_features, self.n_classes),
bias_shape=(self.n_classes,),
random_seed=self.random_seed)
self.cost_ = []
y_enc = self._one_hot(y=y, n_labels=self.n_classes, dtype=np.float)
self.init_time_ = time()
rgen = np.random.RandomState(self.random_seed)
for i in range(self.epochs):
for idx in self._yield_minibatches_idx(
rgen=rgen,
n_batches=self.minibatches,
data_ary=y,
shuffle=True):
# givens:
# w_ -> n_feat x n_classes
# b_ -> n_classes
# net_input, softmax and diff -> n_samples x n_classes:
net = self._net_input(X[idx], self.w_, self.b_)
softm = self._softmax(net)
diff = softm - y_enc[idx]
# gradient -> n_features x n_classes
grad = np.dot(X[idx].T, diff)
# update in opp. direction of the cost gradient
self.w_ -= (self.eta * grad +
self.eta * self.l2 * self.w_)
self.b_ -= (self.eta * np.sum(diff, axis=0))
# compute cost of the whole epoch
net = self._net_input(X, self.w_, self.b_)
softm = self._softmax(net)
cross_ent = self._cross_entropy(output=softm, y_target=y_enc)
cost = self._cost(cross_ent)
self.cost_.append(cost)
if self.print_progress:
self._print_progress(iteration=i + 1,
n_iter=self.epochs,
cost=cost)
return self
def predict_proba(self, X):
"""Predict class probabilities of X from the net input.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
Class probabilties : array-like, shape= [n_samples, n_classes]
"""
net = self._net_input(X, self.w_, self.b_)
softm = self._softmax(net)
return softm
def _predict(self, X):
probas = self.predict_proba(X)
return self._to_classlabels(probas)
| [
"[email protected]"
] | |
748eb1b3110d4ce4036007555737afa714ca4d1e | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/rdbms/azure-mgmt-rdbms/generated_samples/mysql/virtual_network_rules_create_or_update.py | d18659e34af6cf458a211fb1e990c431312e142a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,010 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms.mysql import MySQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python virtual_network_rules_create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MySQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="ffffffff-ffff-ffff-ffff-ffffffffffff",
)
response = client.virtual_network_rules.begin_create_or_update(
resource_group_name="TestGroup",
server_name="vnet-test-svr",
virtual_network_rule_name="vnet-firewall-rule",
parameters={
"properties": {
"ignoreMissingVnetServiceEndpoint": False,
"virtualNetworkSubnetId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet",
}
},
).result()
print(response)
# x-ms-original-file: specification/mysql/resource-manager/Microsoft.DBforMySQL/legacy/stable/2017-12-01/examples/VirtualNetworkRulesCreateOrUpdate.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
fb2a5ba96ca24f614cac37db2dbc94f81c00928d | e838076bc1c8aedbb8c77710b1a1a32efc3a4da1 | /site_selection/migrations/0002_siteselectionselectedsites.py | 6d1aaf6ccd16ac174ac7cf7e4c86b045fbcf5e69 | [] | no_license | abbasgis/ferrp | 5f2f7768f0e38e299498c2e74379311698b6321f | 77736c33e7ec82b6adf247a1bf30ccbc4897f02e | refs/heads/master | 2023-05-25T09:59:45.185025 | 2021-06-12T09:15:07 | 2021-06-12T09:15:07 | 376,236,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-11-15 20:28
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('site_selection', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SiteSelectionSelectedsites',
fields=[
('oid', models.AutoField(primary_key=True, serialize=False)),
('site_name', models.CharField(blank=True, max_length=256, null=True)),
('project_id', models.CharField(blank=True, max_length=256, null=True)),
('created_by', models.IntegerField(blank=True, null=True)),
('updated_by', models.IntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(blank=True, null=True)),
('updated_at', models.DateTimeField(blank=True, null=True)),
('geom', django.contrib.gis.db.models.fields.GeometryField(blank=True, null=True, srid=3857)),
],
options={
'db_table': 'site_selection_selectedsites',
'managed': False,
},
),
]
| [
"abbas123@abc"
] | abbas123@abc |
0f478534f7fcad7d99d58f79b2fc2d2cc39d3729 | d2332604fc80b6d622a263b2af644425a7e703de | /fast-track/dynamic_programming/11_decode_ways.py | 24d39552909846b648b35486f8055c00aeb4d3b3 | [] | no_license | abhijitdey/coding-practice | b3b83a237c1930266768ce38500d6812fc31c529 | 6ae2a565042bf1d6633cd98ed774e4a77f492cc8 | refs/heads/main | 2023-08-14T23:31:06.090613 | 2021-10-18T21:35:56 | 2021-10-18T21:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | """
A message containing letters from A-Z can be encoded into numbers using the following mapping:
'A' -> "1"
'B' -> "2"
...
'Z' -> "26"
To decode an encoded message, all the digits must be grouped then mapped back into letters using the reverse of the mapping above (there may be multiple ways).
For example, "11106" can be mapped into:
"AAJF" with the grouping (1 1 10 6)
"KJF" with the grouping (11 10 6)
Note that the grouping (1 11 06) is invalid because "06" cannot be mapped into 'F' since "6" is different from "06".
Given a string s containing only digits, return the number of ways to decode it.
Range of any letter: 1-26
"""
def decode_ways(s, dp, n):
if len(s[n - 1 :]) == 0:
return 1
if s[n - 1] == "0":
return 0
if len(s[n - 1 :]) == 1:
return 1
if dp[n] is not None:
return dp[n]
if int(s[n - 1]) <= 2 and int(s[n - 1 : n + 1]) <= 26:
# Two ways to decode
dp[n] = decode_ways(s, dp, n + 1) + decode_ways(s, dp, n + 2)
else:
# Only one way to decode
dp[n] = decode_ways(s, dp, n + 1)
return dp[n]
if __name__ == "__main__":
s = "226"
dp = [None] * (len(s) + 1)
dp[0] = 1
print(decode_ways(s, dp, n=1))
| [
"[email protected]"
] | |
a2e126193720517843439923118b13b875d7f842 | bd2a3d466869e0f8cb72075db7daec6c09bbbda1 | /sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/models/_paged_models.py | fdec95712a6365532286786ba2a82a0e79c2e307 | [
"MIT"
] | permissive | samvaity/azure-sdk-for-python | 7e8dcb2d3602d81e04c95e28306d3e2e7d33b03d | f2b072688d3dc688fed3905c558cff1fa0849b91 | refs/heads/master | 2021-08-11T21:14:29.433269 | 2019-07-19T17:40:10 | 2019-07-19T17:40:10 | 179,733,339 | 0 | 1 | MIT | 2019-04-05T18:17:43 | 2019-04-05T18:17:42 | null | UTF-8 | Python | false | false | 3,607 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RegistryPaged(Paged):
"""
A paging container for iterating over a list of :class:`Registry <azure.mgmt.containerregistry.v2019_06_01_preview.models.Registry>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Registry]'}
}
def __init__(self, *args, **kwargs):
super(RegistryPaged, self).__init__(*args, **kwargs)
class OperationDefinitionPaged(Paged):
"""
A paging container for iterating over a list of :class:`OperationDefinition <azure.mgmt.containerregistry.v2019_06_01_preview.models.OperationDefinition>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[OperationDefinition]'}
}
def __init__(self, *args, **kwargs):
super(OperationDefinitionPaged, self).__init__(*args, **kwargs)
class ReplicationPaged(Paged):
"""
A paging container for iterating over a list of :class:`Replication <azure.mgmt.containerregistry.v2019_06_01_preview.models.Replication>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Replication]'}
}
def __init__(self, *args, **kwargs):
super(ReplicationPaged, self).__init__(*args, **kwargs)
class WebhookPaged(Paged):
"""
A paging container for iterating over a list of :class:`Webhook <azure.mgmt.containerregistry.v2019_06_01_preview.models.Webhook>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Webhook]'}
}
def __init__(self, *args, **kwargs):
super(WebhookPaged, self).__init__(*args, **kwargs)
class EventPaged(Paged):
"""
A paging container for iterating over a list of :class:`Event <azure.mgmt.containerregistry.v2019_06_01_preview.models.Event>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Event]'}
}
def __init__(self, *args, **kwargs):
super(EventPaged, self).__init__(*args, **kwargs)
class RunPaged(Paged):
"""
A paging container for iterating over a list of :class:`Run <azure.mgmt.containerregistry.v2019_06_01_preview.models.Run>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Run]'}
}
def __init__(self, *args, **kwargs):
super(RunPaged, self).__init__(*args, **kwargs)
class TaskPaged(Paged):
"""
A paging container for iterating over a list of :class:`Task <azure.mgmt.containerregistry.v2019_06_01_preview.models.Task>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Task]'}
}
def __init__(self, *args, **kwargs):
super(TaskPaged, self).__init__(*args, **kwargs)
| [
"[email protected]"
] | |
3bb3f0f26c82d632406baf4da93d54a98e633d87 | 474e74c654916d0a1b0311fc80eff206968539b1 | /venv/Lib/site-packages/asposewordscloud/models/graphics_quality_options_data.py | a079c87bbc4c8d4978856d861c26fe5f9f3dd00c | [] | no_license | viktor-tchemodanov/Training_Tasks_Python_Cloud | 4592cf61c2f017b314a009c135340b18fa23fc8f | b7e6afab4e9b76bc817ef216f12d2088447bd4cd | refs/heads/master | 2020-09-04T10:39:23.023363 | 2019-11-05T10:36:45 | 2019-11-05T10:36:45 | 219,712,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,947 | py | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="GraphicsQualityOptionsData.py">
# Copyright (c) 2018 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class GraphicsQualityOptionsData(object):
"""Allows to specify additional System.Drawing.Graphics quality options.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'compositing_mode': 'str',
'compositing_quality': 'str',
'interpolation_mode': 'str',
'smoothing_mode': 'str',
'string_format': 'StringFormatData',
'text_rendering_hint': 'str'
}
attribute_map = {
'compositing_mode': 'CompositingMode',
'compositing_quality': 'CompositingQuality',
'interpolation_mode': 'InterpolationMode',
'smoothing_mode': 'SmoothingMode',
'string_format': 'StringFormat',
'text_rendering_hint': 'TextRenderingHint'
}
def __init__(self, compositing_mode=None, compositing_quality=None, interpolation_mode=None, smoothing_mode=None, string_format=None, text_rendering_hint=None): # noqa: E501
"""GraphicsQualityOptionsData - a model defined in Swagger""" # noqa: E501
self._compositing_mode = None
self._compositing_quality = None
self._interpolation_mode = None
self._smoothing_mode = None
self._string_format = None
self._text_rendering_hint = None
self.discriminator = None
if compositing_mode is not None:
self.compositing_mode = compositing_mode
if compositing_quality is not None:
self.compositing_quality = compositing_quality
if interpolation_mode is not None:
self.interpolation_mode = interpolation_mode
if smoothing_mode is not None:
self.smoothing_mode = smoothing_mode
if string_format is not None:
self.string_format = string_format
if text_rendering_hint is not None:
self.text_rendering_hint = text_rendering_hint
@property
def compositing_mode(self):
"""Gets the compositing_mode of this GraphicsQualityOptionsData. # noqa: E501
Gets or sets a value that specifies how composited images are drawn to this Graphics. # noqa: E501
:return: The compositing_mode of this GraphicsQualityOptionsData. # noqa: E501
:rtype: str
"""
return self._compositing_mode
@compositing_mode.setter
def compositing_mode(self, compositing_mode):
"""Sets the compositing_mode of this GraphicsQualityOptionsData.
Gets or sets a value that specifies how composited images are drawn to this Graphics. # noqa: E501
:param compositing_mode: The compositing_mode of this GraphicsQualityOptionsData. # noqa: E501
:type: str
"""
allowed_values = ["SourceOver", "SourceCopy"] # noqa: E501
if not compositing_mode.isdigit():
if compositing_mode not in allowed_values:
raise ValueError(
"Invalid value for `compositing_mode` ({0}), must be one of {1}" # noqa: E501
.format(compositing_mode, allowed_values))
self._compositing_mode = compositing_mode
else:
self._compositing_mode = allowed_values[int(compositing_mode) if six.PY3 else long(compositing_mode)]
@property
def compositing_quality(self):
"""Gets the compositing_quality of this GraphicsQualityOptionsData. # noqa: E501
Gets or sets the rendering quality of composited images drawn to this Graphics. # noqa: E501
:return: The compositing_quality of this GraphicsQualityOptionsData. # noqa: E501
:rtype: str
"""
return self._compositing_quality
@compositing_quality.setter
def compositing_quality(self, compositing_quality):
"""Sets the compositing_quality of this GraphicsQualityOptionsData.
Gets or sets the rendering quality of composited images drawn to this Graphics. # noqa: E501
:param compositing_quality: The compositing_quality of this GraphicsQualityOptionsData. # noqa: E501
:type: str
"""
allowed_values = ["Default", "HighSpeed", "HighQuality", "GammaCorrected", "AssumeLinear", "Invalid"] # noqa: E501
if not compositing_quality.isdigit():
if compositing_quality not in allowed_values:
raise ValueError(
"Invalid value for `compositing_quality` ({0}), must be one of {1}" # noqa: E501
.format(compositing_quality, allowed_values))
self._compositing_quality = compositing_quality
else:
self._compositing_quality = allowed_values[int(compositing_quality) if six.PY3 else long(compositing_quality)]
@property
def interpolation_mode(self):
"""Gets the interpolation_mode of this GraphicsQualityOptionsData. # noqa: E501
Gets or sets the interpolation mode associated with this Graphics. # noqa: E501
:return: The interpolation_mode of this GraphicsQualityOptionsData. # noqa: E501
:rtype: str
"""
return self._interpolation_mode
@interpolation_mode.setter
def interpolation_mode(self, interpolation_mode):
"""Sets the interpolation_mode of this GraphicsQualityOptionsData.
Gets or sets the interpolation mode associated with this Graphics. # noqa: E501
:param interpolation_mode: The interpolation_mode of this GraphicsQualityOptionsData. # noqa: E501
:type: str
"""
allowed_values = ["Default", "Low", "High", "Bilinear", "Bicubic", "NearestNeighbor", "HighQualityBilinear", "HighQualityBicubic", "Invalid"] # noqa: E501
if not interpolation_mode.isdigit():
if interpolation_mode not in allowed_values:
raise ValueError(
"Invalid value for `interpolation_mode` ({0}), must be one of {1}" # noqa: E501
.format(interpolation_mode, allowed_values))
self._interpolation_mode = interpolation_mode
else:
self._interpolation_mode = allowed_values[int(interpolation_mode) if six.PY3 else long(interpolation_mode)]
@property
def smoothing_mode(self):
"""Gets the smoothing_mode of this GraphicsQualityOptionsData. # noqa: E501
Gets or sets the rendering quality for this Graphics. # noqa: E501
:return: The smoothing_mode of this GraphicsQualityOptionsData. # noqa: E501
:rtype: str
"""
return self._smoothing_mode
@smoothing_mode.setter
def smoothing_mode(self, smoothing_mode):
"""Sets the smoothing_mode of this GraphicsQualityOptionsData.
Gets or sets the rendering quality for this Graphics. # noqa: E501
:param smoothing_mode: The smoothing_mode of this GraphicsQualityOptionsData. # noqa: E501
:type: str
"""
allowed_values = ["Default", "HighSpeed", "HighQuality", "None", "AntiAlias", "Invalid"] # noqa: E501
if not smoothing_mode.isdigit():
if smoothing_mode not in allowed_values:
raise ValueError(
"Invalid value for `smoothing_mode` ({0}), must be one of {1}" # noqa: E501
.format(smoothing_mode, allowed_values))
self._smoothing_mode = smoothing_mode
else:
self._smoothing_mode = allowed_values[int(smoothing_mode) if six.PY3 else long(smoothing_mode)]
@property
def string_format(self):
"""Gets the string_format of this GraphicsQualityOptionsData. # noqa: E501
Gets or sets text layout information (such as alignment, orientation and tab stops) display manipulations (such as ellipsis insertion and national digit substitution) and OpenType features. # noqa: E501
:return: The string_format of this GraphicsQualityOptionsData. # noqa: E501
:rtype: StringFormatData
"""
return self._string_format
@string_format.setter
def string_format(self, string_format):
"""Sets the string_format of this GraphicsQualityOptionsData.
Gets or sets text layout information (such as alignment, orientation and tab stops) display manipulations (such as ellipsis insertion and national digit substitution) and OpenType features. # noqa: E501
:param string_format: The string_format of this GraphicsQualityOptionsData. # noqa: E501
:type: StringFormatData
"""
self._string_format = string_format
@property
def text_rendering_hint(self):
"""Gets the text_rendering_hint of this GraphicsQualityOptionsData. # noqa: E501
Gets or sets the rendering mode for text associated with this Graphics. # noqa: E501
:return: The text_rendering_hint of this GraphicsQualityOptionsData. # noqa: E501
:rtype: str
"""
return self._text_rendering_hint
@text_rendering_hint.setter
def text_rendering_hint(self, text_rendering_hint):
"""Sets the text_rendering_hint of this GraphicsQualityOptionsData.
Gets or sets the rendering mode for text associated with this Graphics. # noqa: E501
:param text_rendering_hint: The text_rendering_hint of this GraphicsQualityOptionsData. # noqa: E501
:type: str
"""
allowed_values = ["SystemDefault", "SingleBitPerPixelGridFit", "SingleBitPerPixel", "AntiAliasGridFit", "AntiAlias", "ClearTypeGridFit"] # noqa: E501
if not text_rendering_hint.isdigit():
if text_rendering_hint not in allowed_values:
raise ValueError(
"Invalid value for `text_rendering_hint` ({0}), must be one of {1}" # noqa: E501
.format(text_rendering_hint, allowed_values))
self._text_rendering_hint = text_rendering_hint
else:
self._text_rendering_hint = allowed_values[int(text_rendering_hint) if six.PY3 else long(text_rendering_hint)]
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GraphicsQualityOptionsData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ece90d1b27b7bda334a307b0a1726b78af015b34 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_20866.py | 43b89f649be556f519aaca98d7c1a6b0b17da9d8 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # Python: Splitting a string into elements and adding them in a list
foo = '"MARY","PATRICIA","LINDA","BARBARA","ELIZABETH","JENNIFER","MARIA","SUSAN","MARGARET","DOROTHY","LISA","NANCY","KAREN","BETTY","HELEN","SANDRA","DONNA","CAROL"'
output = foo.replace('"','').split(",")
| [
"[email protected]"
] | |
c52a4a969c82465af49bfbd1a29225e9aec50a10 | 4ed038a638725ac77731b0b97ddd61aa37dd8d89 | /cairis/mio/GoalsContentHandler.py | b44493b44231270bdbf5c2611db7bf07b9c58cee | [
"Apache-2.0"
] | permissive | RachelLar/cairis_update | 0b784101c4aff81ff0390328eb615e335301daa2 | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | refs/heads/master | 2021-01-19T06:25:47.644993 | 2016-07-11T20:48:11 | 2016-07-11T20:48:11 | 63,103,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,306 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from xml.sax.handler import ContentHandler,EntityResolver
from cairis.core.DomainPropertyParameters import DomainPropertyParameters
from cairis.core.GoalParameters import GoalParameters
from cairis.core.ObstacleParameters import ObstacleParameters
from cairis.core.CountermeasureParameters import CountermeasureParameters
from cairis.core.GoalEnvironmentProperties import GoalEnvironmentProperties
from cairis.core.ObstacleEnvironmentProperties import ObstacleEnvironmentProperties
from cairis.core.CountermeasureEnvironmentProperties import CountermeasureEnvironmentProperties
from cairis.core.Target import Target
import cairis.core.RequirementFactory
from cairis.core.Borg import Borg
def a2s(aStr):
if aStr == 'a':
return '*'
elif aStr == '1..a':
return '1..*'
else:
return aStr
def a2i(spLabel):
if spLabel == 'Low':
return 1
elif spLabel == 'Medium':
return 2
elif spLabel == 'High':
return 3
else:
return 0
def u2s(aStr):
outStr = ''
for c in aStr:
if (c == '_'):
outStr += ' '
else:
outStr += c
return outStr
class GoalsContentHandler(ContentHandler,EntityResolver):
def __init__(self,session_id = None):
b = Borg()
self.dbProxy = b.get_dbproxy(session_id)
self.configDir = b.configDir
self.theDomainProperties = []
self.theGoals = []
self.theObstacles = []
self.theRequirements = []
self.theCountermeasures = []
self.resetDomainPropertyAttributes()
self.resetGoalAttributes()
self.resetObstacleAttributes()
self.resetRequirementAttributes()
self.resetGoalAttributes()
self.resetCountermeasureAttributes()
def resolveEntity(self,publicId,systemId):
return self.configDir + '/goals.dtd'
def roles(self):
return self.theRoles
def domainProperties(self):
return self.theDomainProperties
def goals(self):
return self.theGoals
def obstacles(self):
return self.theObstacles
def requirements(self):
return self.theRequirements
def countermeasures(self):
return self.theCountermeasures
def resetDomainPropertyAttributes(self):
self.theName = ''
self.theTags = []
self.theType = ''
self.theDescription = ''
self.theOriginator = ''
def resetGoalAttributes(self):
self.theName = ''
self.theTags = []
self.theOriginator = ''
self.theEnvironmentProperties = []
self.resetGoalEnvironmentAttributes()
def resetObstacleAttributes(self):
self.theName = ''
self.theTags = []
self.theOriginator = ''
self.theEnvironmentProperties = []
self.resetObstacleEnvironmentAttributes()
def resetGoalEnvironmentAttributes(self):
self.inDescription = 0
self.inFitCriterion = 0
self.inIssue = 0
self.theEnvironmentName = ''
self.theCategory = ''
self.thePriority = ''
self.theDescription = ''
self.theConcerns = []
self.theConcernAssociations = []
def resetObstacleEnvironmentAttributes(self):
self.inDescription = 0
self.theEnvironmentName = ''
self.theCategory = ''
self.theDescription = ''
self.theConcerns = []
def resetRequirementAttributes(self):
self.inDescription = 0
self.inRationale = 0
self.inFitCriterion = 0
self.inOriginator = 0
self.theReference = ''
self.theReferenceType = ''
self.theLabel = 0
self.theName = ''
self.theType = ''
self.thePriority = 0
self.theDescription = 0
self.theRationale = 0
self.theFitCriterion = 0
self.theOriginator = 0
def resetCountermeasureAttributes(self):
self.theName = ''
self.theType = ''
self.inDescription = 0
self.theDescription = ''
self.theEnvironmentProperties = []
self.resetCountermeasureEnvironmentAttributes()
def resetCountermeasureEnvironmentAttributes(self):
self.theEnvironmentName = ''
self.theCost = ''
self.theCmRequirements = []
self.theTargets = []
self.theCmRoles = []
self.theTaskPersonas = []
self.theSpDict = {}
self.theSpDict['confidentiality'] = (0,'None')
self.theSpDict['integrity'] = (0,'None')
self.theSpDict['availability'] = (0,'None')
self.theSpDict['accountability'] = (0,'None')
self.theSpDict['anonymity'] = (0,'None')
self.theSpDict['pseudonymity'] = (0,'None')
self.theSpDict['unlinkability'] = (0,'None')
self.theSpDict['unobservability'] = (0,'None')
self.theTargetName = ''
self.theTargetEffectiveness = ''
self.theTargetResponses = []
self.resetMitigatingPropertyAttributes()
def resetMitigatingPropertyAttributes(self):
self.thePropertyName = ''
self.thePropertyValue = 'None'
self.inRationale = 0
self.theRationale = ''
def startElement(self,name,attrs):
self.currentElementName = name
if name == 'domainproperty':
self.theName = attrs['name']
self.theType = attrs['type']
self.theOriginator = attrs['originator']
elif name == 'goal':
self.theName = attrs['name']
self.theOriginator = attrs['originator']
elif name == 'obstacle':
self.theName = attrs['name']
self.theOriginator = attrs['originator']
elif name == 'goal_environment':
self.theEnvironmentName = attrs['name']
self.theCategory = attrs['category']
self.thePriority = attrs['priority']
elif name == 'obstacle_environment':
self.theEnvironmentName = attrs['name']
self.theCategory = u2s(attrs['category'])
elif name == 'concern':
self.theConcerns.append(attrs['name'])
elif name == 'concern_association':
self.theConcernAssociations.append((attrs['source_name'],a2s(attrs['source_nry']),attrs['link_name'],attrs['target_name'],a2s(attrs['target_nry'])))
elif name == 'requirement':
self.theReference = attrs['reference']
try:
self.theName = attrs['name']
except KeyError:
self.theName = ''
self.theReferenceType = attrs['reference_type']
self.theLabel = attrs['label']
self.theType = u2s(attrs['type'])
self.thePriority = attrs['priority']
elif name == 'countermeasure':
self.theName = attrs['name']
self.theType = attrs['type']
elif name == 'countermeasure_environment':
self.theEnvironmentName = attrs['name']
self.theCost = attrs['cost']
elif name == 'countermeasure_requirement':
self.theCmRequirements.append(attrs['name'])
elif name == 'target':
self.theTargetName = attrs['name']
self.theTargetEffectiveness = attrs['effectiveness']
elif name == 'target_response':
self.theTargetResponses.append(attrs['name'])
elif name == 'mitigating_property':
self.thePropertyName = attrs['name']
self.thePropertyValue = a2i(attrs['value'])
elif name == 'responsible_role':
self.theCmRoles.append(attrs['name'])
elif name == 'responsible_persona':
self.theTaskPersonas.append((attrs['task'],attrs['persona'],u2s(attrs['duration']),u2s(attrs['frequency']),u2s(attrs['demands']),u2s(attrs['goals'])))
elif (name == 'description'):
self.inDescription = 1
self.theDescription = ''
elif (name =='definition'):
self.inDescription = 1
self.theDescription = ''
elif name == 'fit_criterion':
self.inFitCriterion = 1
self.theFitCriterion = ''
elif name == 'issue':
self.inIssue = 1
self.theIssue = ''
elif name == 'rationale':
self.inRationale = 1
self.theRationale = ''
elif name == 'originator':
self.inOriginator = 1
self.theOriginator = ''
elif name == 'tag':
self.theTags.append(attrs['name'])
def characters(self,data):
if self.inDescription:
self.theDescription += data
elif self.inFitCriterion:
self.theFitCriterion += data
elif self.inIssue:
self.theIssue += data
elif self.inRationale:
self.theRationale += data
elif self.inOriginator:
self.theOriginator += data
def endElement(self,name):
if name == 'domainproperty':
p = DomainPropertyParameters(self.theName,self.theDescription,self.theType,self.theOriginator,self.theTags)
self.theDomainProperties.append(p)
self.resetDomainPropertyAttributes()
elif name == 'goal_environment':
p = GoalEnvironmentProperties(self.theEnvironmentName,'',self.theDescription,self.theCategory,self.thePriority,self.theFitCriterion,self.theIssue,[],[],self.theConcerns,self.theConcernAssociations)
self.theEnvironmentProperties.append(p)
self.resetGoalEnvironmentAttributes()
elif name == 'obstacle_environment':
p = ObstacleEnvironmentProperties(self.theEnvironmentName,'',self.theDescription,self.theCategory,[],[],self.theConcerns)
self.theEnvironmentProperties.append(p)
self.resetObstacleEnvironmentAttributes()
elif name == 'goal':
p = GoalParameters(self.theName,self.theOriginator,self.theTags,self.theEnvironmentProperties)
self.theGoals.append(p)
self.resetGoalAttributes()
elif name == 'obstacle':
p = ObstacleParameters(self.theName,self.theOriginator,self.theTags,self.theEnvironmentProperties)
self.theObstacles.append(p)
self.resetObstacleAttributes()
elif name == 'requirement':
reqId = self.dbProxy.newId()
r = cairis.core.RequirementFactory.build(reqId,self.theLabel,self.theName,self.theDescription,self.thePriority,self.theRationale,self.theFitCriterion,self.theOriginator,self.theType,self.theReference)
self.theRequirements.append((r,self.theReference,self.theReferenceType))
self.resetRequirementAttributes()
elif name == 'countermeasure':
p = CountermeasureParameters(self.theName,self.theDescription,self.theType,self.theTags,self.theEnvironmentProperties)
self.theCountermeasures.append(p)
self.resetCountermeasureAttributes()
elif name == 'mitigating_property':
self.theSpDict[self.thePropertyName] = (self.thePropertyValue,self.theDescription)
self.resetMitigatingPropertyAttributes()
elif name == 'countermeasure_environment':
cProperty,cRationale = self.theSpDict['confidentiality']
iProperty,iRationale = self.theSpDict['integrity']
avProperty,avRationale = self.theSpDict['availability']
acProperty,acRationale = self.theSpDict['accountability']
anProperty,anRationale = self.theSpDict['anonymity']
panProperty,panRationale = self.theSpDict['pseudonymity']
unlProperty,unlRationale = self.theSpDict['unlinkability']
unoProperty,unoRationale = self.theSpDict['unobservability']
p = CountermeasureEnvironmentProperties(self.theEnvironmentName,self.theCmRequirements,self.theTargets,[cProperty,iProperty,avProperty,acProperty,anProperty,panProperty,unlProperty,unoProperty],[cRationale,iRationale,avRationale,acRationale,anRationale,panRationale,unlRationale,unoRationale],self.theCost,self.theCmRoles,self.theTaskPersonas)
self.theEnvironmentProperties.append(p)
self.resetCountermeasureEnvironmentAttributes()
elif (name == 'target'):
self.theTargets.append(Target(self.theTargetName,self.theTargetEffectiveness,self.theRationale))
self.theTargetResponses = []
elif (name == 'description'):
self.inDescription = 0
elif (name =='definition'):
self.inDescription = 0
elif name == 'fit_criterion':
self.inFitCriterion = 0
elif name == 'issue':
self.inIssue = 0
elif name == 'rationale':
self.inRationale = 0
elif name == 'originator':
self.inOriginator = 0
| [
"[email protected]"
] | |
0efc657b8dcb8e6b318ea4ca6e2a6c04543e1dbd | 891902687207fb335b65dbb8d31d6e20301764f9 | /pe048.py | bc475ea81eba7af44d87a0dfa5b0a74bcdc8ceb0 | [] | no_license | maecchi/PE | 93bd050eaca2733aa37db6ca493b820fe3d7a351 | 3d9092635807f0036719b65adb16f1c0926c2321 | refs/heads/master | 2020-05-04T16:38:36.476355 | 2012-06-10T05:26:10 | 2012-06-10T05:26:10 | 1,746,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe048.py - Project Euler
#
LIMIT = 1000
series = [pow(x,x) for x in xrange(1, LIMIT+1)]
total = sum(series)
total_str = str(total)
ten_digit_str = total_str[-10:]
print ten_digit_str
| [
"[email protected]"
] | |
6a51e12f7a32aaa10eff1954b31dffd2d63024dd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/193/usersdata/274/70731/submittedfiles/al7.py | 1054fb07641b98114c1cfba9aaba25c980ae4b02 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | # -*- coding: utf-8 -*-
n = int(input("Valor de n: "))
i=1
s=0
while (i<n):
if (n%i)==0:
s-s+1
i=i+1
print(s)
if s==n:
print("PERFEITO")
else:
print("NÃO PERFEITO") | [
"[email protected]"
] | |
a5e3c2dd3665157ca080d0fc9762c4e20c48c388 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/7cf0626d7b9176f0eba3ff83c69c5b4553ae3f7e-<validate_distribution_from_caller_reference>-fix.py | b639b22fa205318f577ce5de14a54a2382c3197b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py |
def validate_distribution_from_caller_reference(self, caller_reference):
try:
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
distribution_name = 'Distribution'
distribution_config_name = 'DistributionConfig'
distribution_ids = [dist.get('Id') for dist in distributions]
for distribution_id in distribution_ids:
distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
if (distribution is not None):
distribution_config = distribution[distribution_name].get(distribution_config_name)
if ((distribution_config is not None) and (distribution_config.get('CallerReference') == caller_reference)):
distribution[distribution_name][distribution_config_name] = distribution_config
return distribution
except Exception as e:
self.module.fail_json_aws(e, msg='Error validating distribution from caller reference')
| [
"[email protected]"
] | |
7b46f5761fbed7cb98152ac3384dc472e21fbcc6 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /edgelm/fairseq/file_io.py | dd2865cd448fe581b22d069b32f12c045efc8c1f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 5,806 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:
from iopath.common.file_io import g_pathmgr as IOPathManager
try:
# [FB only - for now] AWS PathHandler for PathManager
from .fb_pathhandlers import S3PathHandler
IOPathManager.register_handler(S3PathHandler())
except KeyError:
logging.warning("S3PathHandler already registered.")
except ImportError:
logging.debug(
"S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module."
)
except ImportError:
IOPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
| [
"tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net"
] | tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net |
3fb2e07f62201caffa8b67a78f4e24fe0fe44f69 | 0d178d54334ddb7d669d212b11dd23ef5607cf8e | /LeetCode/Array/4Sum.py | 11f7a1109fd6bbfb0bdb4c287a979f1a7fa60b2f | [] | no_license | mrunalhirve12/Python_CTCI-practise | 2851d2c61fd59c76d047bd63bd591849c0781dda | f41348fd7da3b7af9f9b2df7c01457c7bed8ce0c | refs/heads/master | 2020-04-17T11:09:29.213922 | 2019-09-28T02:36:24 | 2019-09-28T02:36:24 | 166,529,867 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | """
Given an array nums of n integers and an integer target, are there elements a, b, c, and d in nums such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note:
The solution set must not contain duplicate quadruplets.
Example:
Given array nums = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
# idea to use same technique of incrementing and decrementing pointers
a = sorted(nums)
res = set()
n = len(a)
for i in range(0, n-3):
for j in range(i+1, n-2):
rem = target - (a[i] + a[j])
left, right = j+1, n-1
while left < right:
if a[left] + a[right] == rem:
# to add tuple to res
res.add(tuple([a[i], a[j], a[left], a[right]]))
left = left + 1
elif a[left] + a[right] < rem:
left = left + 1
else:
right = right - 1
# sorted converts set to list
return sorted([list(x) for x in res])
s = Solution()
print(s.fourSum([1, 0, -1, 0, -2, 2], 0)) | [
"[email protected]"
] | |
99d474d6de01788f9f44e8db380fcd8057be8c85 | 2e996d6870424205bc6af7dabe8685be9b7f1e56 | /code/processing/20190325_r3_O3_IND_titration_flow/file_rename.py | 51d27275b95739132c62e7ef1b063c6806355426 | [
"CC-BY-4.0",
"MIT"
] | permissive | minghao2016/mwc_mutants | fd705d44e57e3b2370d15467f31af0ee3945dcc2 | 0f89b3920c6f7a8956f48874615fd1977891e33c | refs/heads/master | 2023-03-25T03:56:33.199379 | 2020-06-26T20:09:00 | 2020-06-26T20:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | # -*- coding: utf-8 -*-
import numpy as np
import fcsparser
import os
import glob
# Define the details fo the expriment.
USERNAME = 'gchure'
DATE = 20190325
RUN_NO = 3
FCS_PATTERN = 'RP2019-03-25'
savedir = '../../../data/flow/csv/'
# Define the order of rows and the cols.
R = (0, 0, 260, 260, 260, 260)
ROWS = ('auto', 'delta', 'F164T', 'Q294V', 'Q294K', 'Q294R')
OPS = ('NA', 'O3', 'O3', 'O3', 'O3', 'O3')
COLS = (0, 0.1, 5, 10, 25, 50, 75, 100, 250, 500, 1000, 5000)
# Get the names of the files
files = glob.glob('../../../data/flow/fcs/{0}*r{1}*.fcs'.format(FCS_PATTERN, RUN_NO))
files = np.sort(files)
# Break the list up into columns.
ncols, nrows = len(COLS), len(ROWS)
col_groups = [files[i:i + nrows] for i in range(0, len(files), nrows)]
for i, col in enumerate(col_groups):
for j, samp in enumerate(col):
# Define the new name.
name = '{0}_r{1}_{2}_R{3}_{4}_{5}uMIPTG'.format(
DATE, RUN_NO, OPS[j], R[j], ROWS[j], COLS[i])
# Load the file using fcsparser and save to csv.
_, data = fcsparser.parse(samp)
data.to_csv('{0}{1}.csv'.format(savedir, name))
# Rename the fcs file.
os.rename(samp, '../../../data/flow/fcs/{0}.fcs'.format(name)) | [
"[email protected]"
] | |
79a91e47db28a01386fb815a32b47a218c215852 | f3b233e5053e28fa95c549017bd75a30456eb50c | /tyk2_input/31/31-46_MD_NVT_rerun/set_7.py | 83bf1b35f537aee7c2dd8f6127d9919cfeab9ce4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/tyk2/L31/MD_NVT_rerun/ti_one-step/31_46/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_7.in'
temp_pbs = filesdir + 'temp_7.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_7.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_7.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
b24b3b508692c9d3bbffa96ff99acdc158a53fa4 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.1/desktop/kde/base/kdesdk/actions.py | ab36d22af81fe87c89ebff98566184311e00fa96 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import kde
def setup():
autotools.make("-f admin/Makefile.common")
kde.configure("--with-subversion \
--with-berkeley-db \
--with-db-name=db-4.2 \
--with-db-include-dir=/usr/include/db4.2")
def build():
kde.make()
def install():
kde.install()
| [
"[email protected]"
] | |
db0afec86c62701b4b6b347de2fe3cb745f7d55f | ef32b87973a8dc08ba46bf03c5601548675de649 | /pytglib/api/functions/get_chat_sponsored_message.py | 71868b22788dde705d4134cc9c51f27345d2e10d | [
"MIT"
] | permissive | iTeam-co/pytglib | 1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721 | d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5 | refs/heads/master | 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null | UTF-8 | Python | false | false | 776 | py |
from ..utils import Object
class GetChatSponsoredMessage(Object):
"""
Returns sponsored message to be shown in a chat; for channel chats only. Returns a 404 error if there is no sponsored message in the chat
Attributes:
ID (:obj:`str`): ``GetChatSponsoredMessage``
Args:
chat_id (:obj:`int`):
Identifier of the chat
Returns:
SponsoredMessage
Raises:
:class:`telegram.Error`
"""
ID = "getChatSponsoredMessage"
def __init__(self, chat_id, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
@staticmethod
def read(q: dict, *args) -> "GetChatSponsoredMessage":
chat_id = q.get('chat_id')
return GetChatSponsoredMessage(chat_id)
| [
"[email protected]"
] | |
94441011a2b628e6ade319ba6fe05aa2e33398eb | e70e8f9f5c1b20fe36feab42ad4c2c34fc094069 | /Python/Programming Basics/Simple Calculations/02. Inches to Centimeters.py | 955473f8a25126df1ba1825d71bb4153c24c4017 | [
"MIT"
] | permissive | teodoramilcheva/softuni-software-engineering | 9247ca2032915d8614017a3762d3752b3e300f37 | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | refs/heads/main | 2023-03-29T15:55:54.451641 | 2021-04-09T18:46:32 | 2021-04-09T18:46:32 | 333,551,625 | 0 | 0 | null | 2021-04-09T18:46:32 | 2021-01-27T20:30:18 | Python | UTF-8 | Python | false | false | 80 | py | inches = float(input('Inches = '))
print('Centimeters = ' + str(inches * 2.54))
| [
"[email protected]"
] | |
fc77eaf0993fe68fe4b3692b3b0971b77c561865 | 8bb6fad924eae0aa03e36e70816ab9659131c190 | /test/account_test.py | 47ce554ce9c49f948983a15223a1f0369c55b25b | [
"MIT"
] | permissive | birkin/illiad3_client | 98c6f2200a24b140dc1a489692a16d552554d402 | d9dc3a1dbdc9b4c3181111eedc02867ab0d59088 | refs/heads/master | 2020-12-03T04:01:20.922533 | 2018-07-13T13:06:20 | 2018-07-13T13:06:20 | 95,804,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,263 | py | import os, sys, pprint, unittest
## add project parent-directory to sys.path
parent_working_dir = os.path.abspath( os.path.join(os.getcwd(), os.pardir) )
sys.path.append( parent_working_dir )
from illiad3_client.illiad3.account import IlliadSession
class AccountTest(unittest.TestCase):
def setUp(self):
self.ILLIAD_REMOTE_AUTH_URL = os.environ['ILLIAD_MODULE__TEST_REMOTE_AUTH_URL']
self.ILLIAD_REMOTE_AUTH_KEY = os.environ['ILLIAD_MODULE__TEST_REMOTE_AUTH_KEY']
self.ILLIAD_USERNAME = os.environ['ILLIAD_MODULE__TEST_USERNAME']
self.ill = IlliadSession(
self.ILLIAD_REMOTE_AUTH_URL, self.ILLIAD_REMOTE_AUTH_KEY, self.ILLIAD_USERNAME )
def tearDown(self):
self.ill.logout()
def test_login(self):
login_resp_dct = self.ill.login()
self.assertTrue( 'session_id' in login_resp_dct.keys() )
self.assertTrue( 'authenticated' in login_resp_dct.keys() )
self.assertTrue( 'registered' in login_resp_dct.keys() )
self.assertTrue( login_resp_dct['authenticated'] )
## submit_key tests ##
def test_submit_key(self):
""" Tests submit_key on article openurl. """
ill = self.ill
ill.login()
#Url encoded
openurl = "rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.spage=538&rft.issue=5&rft.date=2010-02-11&rft.volume=16&url_ver=Z39.88-2004&rft.atitle=Targeting+%CE%B17+Nicotinic+Acetylcholine+Receptors+in+the+Treatment+of+Schizophrenia.&rft.jtitle=Current+pharmaceutical+design&rft.issn=1381-6128&rft.genre=article"
submit_key = ill.get_request_key(openurl)
self.assertEqual(submit_key['ILLiadForm'],
'ArticleRequest')
self.assertEqual(submit_key['PhotoJournalTitle'],
'Current pharmaceutical design')
def test_book(self):
""" Tests submit_key on simple book openurl (includes a note). """
ill = self.ill
ill.login()
openurl = "sid=FirstSearch:WorldCat&genre=book&isbn=9780231122375&title=Mahatma%20Gandhi%20%3A%20nonviolent%20power%20in%20action&date=2000&rft.genre=book¬es=%E2%80%9Ci%C3%B1t%C3%ABrn%C3%A2ti%C3%B8n%C3%A0l%C4%ADz%C3%A6ti%D0%A4n%E2%80%9D"
submit_key = ill.get_request_key(openurl)
self.assertEqual( 'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual( 'Mahatma Gandhi : nonviolent power in action', submit_key['LoanTitle'] )
self.assertEqual( 'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual( '“iñtërnâtiønàlĭzætiФn”', submit_key['Notes'] )
self.assertEqual(
['CitedIn', 'ILLiadForm', 'ISSN', 'LoanDate', 'LoanTitle', 'NotWantedAfter', 'Notes', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
def test_book_with_long_openurl(self):
""" Tests submit_key on long book openurl. """
ill = self.ill
ill.login()
openurl = 'sid=FirstSearch%3AWorldCat&genre=book&isbn=9784883195732&title=Shin+kanzen+masuta%CC%84.+Nihongo+no%CC%84ryoku+shiken&date=2011&aulast=Fukuoka&aufirst=Rieko&id=doi%3A&pid=858811926%3Cfssessid%3E0%3C%2Ffssessid%3E%3Cedition%3EShohan.%3C%2Fedition%3E&url_ver=Z39.88-2004&rfr_id=info%3Asid%2Ffirstsearch.oclc.org%3AWorldCat&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&req_dat=%3Csessionid%3E0%3C%2Fsessionid%3E&rfe_dat=%3Caccessionnumber%3E858811926%3C%2Faccessionnumber%3E&rft_id=info%3Aoclcnum%2F858811926&rft_id=urn%3AISBN%3A9784883195732&rft.aulast=Fukuoka&rft.aufirst=Rieko&rft.btitle=Shin+kanzen+masuta%CC%84.+Nihongo+no%CC%84ryoku+shiken&rft.date=2011&rft.isbn=9784883195732&rft.place=To%CC%84kyo%CC%84&rft.pub=Suri%CC%84e%CC%84+Nettowa%CC%84ku&rft.edition=Shohan.&rft.genre=book'
submit_key = ill.get_request_key( openurl )
self.assertEqual(
'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual(
['CitedIn', 'ESPNumber', 'ILLiadForm', 'ISSN', 'LoanAuthor', 'LoanDate', 'LoanEdition', 'LoanPlace', 'LoanPublisher', 'LoanTitle', 'NotWantedAfter', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
def test_bookitem(self):
""" Tests submit_key on genre=bookitem openurl. """
ill = self.ill
ill.login()
openurl = 'url_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.btitle=Current%20Protocols%20in%20Immunology&rft.atitle=Isolation%20and%20Functional%20Analysis%20of%20Neutrophils&rft.date=2001-05-01&rft.isbn=9780471142737&rfr_id=info%3Asid%2Fwiley.com%3AOnlineLibrary'
submit_key = ill.get_request_key( openurl )
self.assertEqual(
'BookChapterRequest', submit_key['ILLiadForm'] )
self.assertEqual(
['CitedIn', 'ILLiadForm', 'ISSN', 'NotWantedAfter', 'PhotoArticleTitle', 'PhotoJournalInclusivePages', 'PhotoJournalTitle', 'PhotoJournalYear', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
def test_tiny_openurl(self):
""" Tests submit_key on painfully minimalist openurl. """
ill = self.ill
ill.login()
openurl = 'sid=Entrez:PubMed&id=pmid:23671965'
submit_key = ill.get_request_key( openurl )
self.assertEqual(
'LoanRequest', submit_key['ILLiadForm'] )
self.assertEqual(
['CitedIn', 'ILLiadForm', 'LoanDate', 'LoanTitle', 'NotWantedAfter', 'Notes', 'SearchType', 'SessionID', 'SubmitButton', 'Username', 'blocked', 'errors'],
sorted(submit_key.keys()) )
self.assertEqual(
'entire openurl: `sid=Entrez:PubMed&id=pmid:23671965`', submit_key['Notes'] )
def test_logout(self):
""" Tests logout. """
response_dct = self.ill.logout()
self.assertTrue( 'authenticated' in response_dct.keys() )
self.assertFalse(response_dct['authenticated'])
def suite():
suite = unittest.makeSuite(AccountTest, 'test')
return suite
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
039a3452010ce342a27554c18b0625ee81a2779a | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20171001/express_route_circuit_authorization.py | bedae3d9a79d6a3dc6430718bb78d7840978bec3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,604 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['ExpressRouteCircuitAuthorization']
class ExpressRouteCircuitAuthorization(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
authorization_name: Optional[pulumi.Input[str]] = None,
authorization_use_status: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Authorization in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] authorization_name: The name of the authorization.
:param pulumi.Input[str] authorization_use_status: AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
if authorization_name is None:
raise TypeError("Missing required property 'authorization_name'")
__props__['authorization_name'] = authorization_name
__props__['authorization_use_status'] = authorization_use_status
if circuit_name is None:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['id'] = id
__props__['name'] = name
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitAuthorization")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitAuthorization, __self__).__init__(
'azure-nextgen:network/v20171001:ExpressRouteCircuitAuthorization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitAuthorization':
"""
Get an existing ExpressRouteCircuitAuthorization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRouteCircuitAuthorization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> pulumi.Output[Optional[str]]:
"""
AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
"""
return pulumi.get(self, "authorization_use_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
f6ebb3862bcfeae9cb815cf8f6f75caf7ece1cbf | c4a57dced2f1ed5fd5bac6de620e993a6250ca97 | /huaxin/huaxin_ui/ui_android_xjb_2_0/register_page.py | f36c00fdfd14245afe93d9b85d7c54953dbe4ae2 | [] | no_license | wanglili1703/firewill | f1b287b90afddfe4f31ec063ff0bd5802068be4f | 1996f4c01b22b9aec3ae1e243d683af626eb76b8 | refs/heads/master | 2020-05-24T07:51:12.612678 | 2019-05-17T07:38:08 | 2019-05-17T07:38:08 | 187,169,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | # coding: utf-8
from _common.page_object import PageObject
from _common.xjb_decorator import gesture_close_afterwards, user_info_close_afterwards, robot_log
from _tools.mysql_xjb_tools import MysqlXjbTools
from huaxin_ui.ui_android_xjb_2_0.binding_card_page import BindingCardPage
import huaxin_ui.ui_android_xjb_2_0.home_page
PHONE_NUMBER = "xpath_//android.widget.EditText[@text='请输入手机号码']"
GET_VERIFICATION_CODE = "xpath_//android.widget.Button[@text='获取验证码']"
VERIFICATION_CODE_INPUT = "xpath_//android.widget.EditText[@text='请输入验证码']"
PASSWORD = "xpath_//android.widget.EditText[@resource-id='com.shhxzq.xjb:id/register_pwd']"
LOGIN_PASSWORD_CONFIRM = "xpath_//android.widget.Button[@text='注册']"
BINDING_CARD = "xpath_//android.widget.Button[@text='绑定银行卡']"
SHOPPING_FIRST = "xpath_//android.widget.TextView[@text='先逛逛']"
TRADE_PASSWORD = "xpath_//android.widget.EditText[@resource-id='com.shhxzq.xjb:id/tradepwd_et']"
TRADE_PASSWORD_CONFIRM = "xpath_//android.widget.Button[@text='下一步']"
current_page = []
class RegisterPage(PageObject):
def __init__(self, web_driver):
super(RegisterPage, self).__init__(web_driver)
self.elements_exist(*current_page)
self._db = MysqlXjbTools()
@user_info_close_afterwards
@gesture_close_afterwards
def register(self, phone_number, login_password):
self.perform_actions(
PHONE_NUMBER, phone_number,
GET_VERIFICATION_CODE,
PASSWORD, login_password,
)
verification_code = MysqlXjbTools().get_sms_verify_code(mobile=phone_number, template_id='cif_register')
self.perform_actions(
VERIFICATION_CODE_INPUT, verification_code,
LOGIN_PASSWORD_CONFIRM,
SHOPPING_FIRST,
)
page = huaxin_ui.ui_android_xjb_2_0.home_page.HomePage(self.web_driver)
return page
@robot_log
def register_binding_card(self, phone_number, login_password, trade_password):
self.perform_actions(PHONE_NUMBER, phone_number,
GET_VERIFICATION_CODE,
PASSWORD, login_password)
verification_code = MysqlXjbTools().get_sms_verify_code(mobile=phone_number, template_id='cif_register')
self.perform_actions(VERIFICATION_CODE_INPUT, verification_code, )
self.perform_actions(
LOGIN_PASSWORD_CONFIRM,
BINDING_CARD,
TRADE_PASSWORD, trade_password,
TRADE_PASSWORD, trade_password,
TRADE_PASSWORD_CONFIRM,
)
page = BindingCardPage(self.web_driver)
return page
| [
"[email protected]"
] | |
9f51a684b8c7951a2e4fc7e6f2705499041116ae | 8f7a30fd1c4d70535ba253d6e442576944fdfd7c | /Topics/Magic methods/10 puppies/main.py | e444a74a24c6ddca7f787232073b25a34c423935 | [] | no_license | TogrulAga/Coffee-Machine | 9596c3d8ef1b7347d189249f20602b584d8842e3 | f065de747bd1b626e4e5a06fac68202e41b6c11e | refs/heads/master | 2023-04-11T20:54:21.710264 | 2021-05-09T23:01:48 | 2021-05-09T23:01:48 | 365,864,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | class Puppy:
n_puppies = 0 # number of created puppies
# define __new__
def __new__(cls):
if cls.n_puppies >= 10:
return None
cls.n_puppies += 1
return object.__new__(cls)
| [
"[email protected]"
] | |
5b8468dad0ffc2610646ee99a9814491cbdeb199 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/ui/eveUIProcs.py | ea6ae5bc59cf6e80cb3020348a440d2d503d85e2 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 3,969 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\eveUIProcs.py
import uthread
import eve.common.script.sys.eveCfg as util
import locks
import random
import svc
import carbonui.const as uiconst
import localization
class EveUIProcSvc(svc.uiProcSvc):
__guid__ = 'svc.eveUIProcSvc'
__replaceservice__ = 'uiProcSvc'
__startupdependencies__ = ['cmd']
def Run(self, *args):
svc.uiProcSvc.Run(self, *args)
self.uiCallbackDict = {None: self._NoneKeyIsInvalid_Callback,
'OpenCharacterCustomization': self.__OpenCharacterCustomization_Callback,
'CorpRecruitment': self._CorpRecruitment_Callback,
'OpenCorporationPanel_Planets': self._OpenCorporationPanel_Planets_Callback,
'OpenAuraInteraction': self.cmd.OpenAuraInteraction,
'ExitStation': self.cmd.CmdExitStation,
'OpenFitting': self.cmd.OpenFitting,
'OpenShipHangar': self.cmd.OpenShipHangar,
'OpenCargoBay': self.cmd.OpenCargoHoldOfActiveShip,
'OpenDroneBay': self.cmd.OpenDroneBayOfActiveShip,
'OpenMarket': self.cmd.OpenMarket,
'OpenAgentFinder': self.cmd.OpenAgentFinder,
'OpenStationDoor': self.__OpenStationDoor_Callback,
'EnterHangar': self.cmd.CmdEnterHangar,
'GiveNavigationFocus': self._GiveNavigationFocus_Callback}
self.isOpeningPI = False
def _PerformUICallback(self, callbackKey):
callback = self.uiCallbackDict.get(callbackKey, None)
if callback is not None:
uthread.worker('_PerformUICallback_%s' % callbackKey, self._PerformUICallbackTasklet, callbackKey, callback)
return True
self.LogError('ActionObject.PerformUICallback: Unknown callbackKey', callbackKey)
return False
def _PerformUICallbackTasklet(self, callbackKey, callback):
try:
callback()
except TypeError as e:
self.LogError('ActionObject.PerformUICallback: callbackKey "%s" is associated with a non-callable object: %s' % (callbackKey, callback), e)
def _NoneKeyIsInvalid_Callback(self):
self.LogError('PerformUICallback called from ActionObject without the callbackKey property (it was None)!')
def _CorpRecruitment_Callback(self):
if util.IsNPC(session.corpid):
self.cmd.OpenCorporationPanel_RecruitmentPane()
else:
self.cmd.OpenCorporationPanel()
def _GiveNavigationFocus_Callback(self):
sm.GetService('navigation').Focus()
def _OpenCorporationPanel_Planets_Callback(self):
if self.isOpeningPI:
return
self.isOpeningPI = True
try:
if sm.GetService('planetSvc').GetMyPlanets():
self.cmd.OpenPlanets()
else:
systemData = sm.GetService('map').GetSolarsystemItems(session.solarsystemid2)
systemPlanets = []
for orbitalBody in systemData:
if orbitalBody.groupID == const.groupPlanet:
systemPlanets.append(orbitalBody)
planetID = systemPlanets[random.randrange(0, len(systemPlanets))].itemID
sm.GetService('viewState').ActivateView('planet', planetID=planetID)
if not settings.user.suppress.Get('suppress.PI_Info', None):
uicore.Message('PlanetaryInteractionIntroText')
finally:
self.isOpeningPI = False
def __OpenStationDoor_Callback(self):
uicore.Message('CaptainsQuartersStationDoorClosed')
def __OpenCharacterCustomization_Callback(self):
if getattr(sm.GetService('map'), 'busy', False):
return
if uicore.Message('EnterCharacterCustomizationCQ', {}, uiconst.YESNO, uiconst.ID_YES) == uiconst.ID_YES:
self.cmd.OpenCharacterCustomization()
| [
"[email protected]"
] | |
580505ac4ba1e1a284893894570d873fee8578a5 | 3bc7db0cc5f66aff517b18f0a1463fffd7b37a6f | /generate.py | 5162c4a370a08417a9a630111ec0eec988adcd19 | [
"MIT"
] | permissive | patilvinay/docker-python-node | 6643f96fd89214c7fe54c0010890052030e60016 | fbab922c579ea0b6b12ce2183fe8d0e48cdd666a | refs/heads/master | 2021-10-08T04:05:59.094149 | 2018-12-07T15:09:01 | 2018-12-07T15:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,020 | py | #!/usr/bin/env python3
import itertools
import os
from copy import deepcopy
from glob import glob
from os.path import dirname
from os.path import join
from shutil import unpack_archive
from typing import List
from urllib.request import urlretrieve
import requests
import yaml
from dockerfile_compose import include_dockerfile
from packaging.version import Version
def get_repo_version(repo):
res = requests.get(f'https://api.github.com/repos/{repo}/branches/master',
headers={'Accept': 'application/vnd.github.v3+json'})
if res.status_code != 200:
raise RuntimeError(f"Can't get version for {repo}")
return res.json()['commit']['sha']
repos = {
'nodejs/docker-node': {
'version': get_repo_version('nodejs/docker-node')
},
'docker-library/python': {
'version': get_repo_version('docker-library/python')
}
}
def fetch_all_repos():
if not os.path.exists('repos'):
os.makedirs('repos')
for k, v in repos.items():
version = v['version']
url = f'https://github.com/{k}/archive/{version}.zip'
zip_name = k.split('/')[1]
zip = f'repos/{zip_name}-{version}.zip'
urlretrieve(url, zip)
unpack_archive(zip, extract_dir='repos')
def get_dockerfiles(path):
return glob(join(path, r'*/stretch/Dockerfile'))
def get_python_dockerfiles():
return get_dockerfiles('repos/python-{}'.format(repos['docker-library/python']['version']))
def get_node_dockerfiles():
return get_dockerfiles('repos/docker-node-{}'.format(repos['nodejs/docker-node']['version']))
def update_travis_yaml():
with open('.travis.yml', 'r') as travis_yaml:
travis_dict = yaml.safe_load(travis_yaml)
dockerfiles = glob('dockerfiles/*/Dockerfile')
travis_dict = travis_yaml_add_stages(travis_dict, dockerfiles)
with open('.travis.yml', 'w+') as travis_yaml:
travis_yaml.write('# generated by generate.py\n')
yaml.safe_dump(travis_dict, travis_yaml, default_flow_style=False)
def get_versions_from_dockerfile(dockerfile_path):
versions = {'node': None, 'python': None}
with open(dockerfile_path, 'r') as df:
for line in df:
if line.startswith('ENV'):
name, version = line.split()[1:]
if name == 'PYTHON_VERSION':
versions['python'] = Version(version)
if name == 'NODE_VERSION':
versions['node'] = Version(version)
return versions
def make_build_stage(dockerfile_path: str, tags: List[str]) -> dict:
return {
'stage': 'Image Builds',
'name': ', '.join(tags),
'if': 'type NOT IN (cron)',
'script': [
'set -e',
'echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin',
'# run tests',
f'travis_retry docker build -t austinpray/python-node {dirname(dockerfile_path)}',
*[f'docker tag austinpray/python-node austinpray/python-node:{tag}' for tag in tags],
*[f'[ "$TRAVIS_BRANCH" = "master" ] && docker push austinpray/python-node:{tag}' for tag in tags]
]
}
def travis_yaml_add_stages(travis_dict: dict, dockerfile_paths: List[str]) -> dict:
dockerfiles = []
for dockerfile_path in dockerfile_paths:
versions = get_versions_from_dockerfile(dockerfile_path)
dockerfiles.append({
'dockerfile_path': dockerfile_path,
'python_version': versions['python'],
'node_version': versions['node']
})
dockerfiles.sort(key=lambda x: (x['python_version'], x['node_version']))
dockerfiles.reverse()
def strip_version(version, n=0):
if n == 0:
return '.'.join(str(version).split('.'))
return '.'.join(str(version).split('.')[:n])
def group_by_version(py_offset=0, node_offset=0):
group = {}
for df in deepcopy(dockerfiles):
key = ''.join([
strip_version(df['python_version'],
py_offset),
'-',
strip_version(df['node_version'],
node_offset)
])
if key not in group:
group[key] = df['dockerfile_path']
return group
options = [-2, -1, 0]
dockerfile_tags = {}
for t in itertools.product(options, options):
for tag, dockerfile in group_by_version(t[0], t[1]).items():
if dockerfile not in dockerfile_tags:
dockerfile_tags[dockerfile] = [tag]
continue
dockerfile_tags[dockerfile].append(tag)
travis_dict['jobs'] = {
'include': [
*[make_build_stage(dockerfile_path=df,
tags=tags) for df, tags in dockerfile_tags.items()]
]
}
return travis_dict
def generate_dockerfiles():
for dockerfileTuple in itertools.product(get_python_dockerfiles(), get_node_dockerfiles()):
python_version = dockerfileTuple[0].split('/')[2]
node_version = dockerfileTuple[1].split('/')[2]
tag = f'{python_version}-{node_version}'
print(tag)
tag_dir = f'dockerfiles/{tag}'
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
with open(join(tag_dir, 'Dockerfile'), 'w+') as template:
template.write('''
# This is generated by generate.py, don't edit it directly
'''.strip())
template.write('\n')
template.write('FROM buildpack-deps:stretch\n')
template.write('\n')
with open(dockerfileTuple[0], 'r') as df:
include_dockerfile(df, template)
with open(dockerfileTuple[1], 'r') as df:
include_dockerfile(df, template)
template.write('CMD ["python3"]\n')
def main():
fetch_all_repos()
generate_dockerfiles()
update_travis_yaml()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
513e63af05b9489a3168b1f4f389088edf36f4a2 | 0cf316b6a125442294acdf78fe725de42a3ce6b4 | /python/CosmiQNet.training.py | 6d6e36cca5b642da0885b772be944269f78223c1 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | GPrathap/utilities | 2a5f9ef2df9fdaa7a2ee9208aa8bbbca879be1f2 | 0624564e53a2860e66265654c23908688067798a | refs/heads/master | 2021-01-19T17:59:00.588299 | 2017-08-26T14:08:38 | 2017-08-26T14:08:38 | 101,102,401 | 0 | 0 | null | 2017-08-22T20:01:22 | 2017-08-22T20:01:22 | null | UTF-8 | Python | false | false | 4,008 | py | # The NN
with tf.device(gpu):
# Input is has numberOfBands for the pre-processed image and numberOfBands for the original image
xy = tf.placeholder(tf.float32, shape=[None, FLAGS.ws, FLAGS.ws, 2*numberOfBands])
with tf.name_scope("split") as scope:
x = tf.slice(xy, [0,0,0,0], [-1,-1,-1,numberOfBands]) # low res image
y = tf.slice(xy, [0,0,0,numberOfBands], [-1,-1,-1,-1]) # high res image
with tf.name_scope("initial_costs") as scope:
# used as a measure of improvement not for optimization
cost_initial = tf.reduce_sum ( tf.pow( x-y,2))
MSE_initial = cost_initial/(FLAGS.ws*FLAGS.ws*(1.0*numberOfBands)*FLAGS.batch_size)
PSNR_initial = -10.0*tf.log(MSE_initial)/np.log(10.0)
for i in range(FLAGS.total_layers):
with tf.name_scope("layer"+str(i)) as scope:
# alpha and beta are pertubation layer bypass parameters that determine a convex combination of a input layer and output layer
alpha[i] = tf.Variable(0.1, name='alpha_'+str(i))
beta[i] = tf.maximum( FLAGS.min_alpha , tf.minimum ( 1.0 , alpha[i] ), name='beta_'+str(i))
if (0 == i) :
inlayer[i] = x
else :
inlayer[i] = outlayer[i-1]
# we build a list of variables to optimize per layer
vars_layer = [alpha[i]]
# Convolutional layers
W[i][0] = tf.Variable(tf.truncated_normal([FLAGS.filter_size,FLAGS.filter_size,numberOfBands,FLAGS.filters], stddev=0.1), name='W'+str(i)+'.'+str(0))
b[i][0] = tf.Variable(tf.constant(0.0,shape=[FLAGS.filters]), name='b'+str(i)+'.'+str(0))
conv[i][0] = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d( inlayer[i], W[i][0], strides=[1,1,1,1], padding='SAME'), b[i][0], name='conv'+str(i)+'.'+str(0)))
for j in range(1,FLAGS.convolutions_per_layer):
W[i][j] = tf.Variable(tf.truncated_normal([FLAGS.filter_size,FLAGS.filter_size,FLAGS.filters,FLAGS.filters], stddev=0.1), name='W'+str(i)+'.'+str(j))
b[i][j] = tf.Variable(tf.constant(0.0,shape=[FLAGS.filters]), name='b'+str(i)+'.'+str(j))
vars_layer = vars_layer + [W[i][j],b[i][j]]
conv[i][j] = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d( conv[i][j-1], W[i][j], strides=[1,1,1,1], padding='SAME'), b[i][j], name='conv'+str(i)+'.'+str(j)))
# Deconvolutional layer
Wo[i] = tf.Variable(tf.truncated_normal([FLAGS.filter_size,FLAGS.filter_size,numberOfBands,FLAGS.filters], stddev=0.1), name='Wo'+str(i))
bo[i] = tf.Variable(tf.constant(0.0,shape=[FLAGS.filters]), name='bo'+str(i))
deconv[i] = tf.nn.relu(
tf.nn.conv2d_transpose(
tf.nn.bias_add( conv[i][FLAGS.convolutions_per_layer-1], bo[i]), Wo[i], [FLAGS.batch_size,FLAGS.ws,FLAGS.ws,numberOfBands] ,strides=[1,1,1,1], padding='SAME'))
vars_layer = vars_layer + [Wo[i],bo[i]]
# Convex combination of input and output layer
outlayer[i] = tf.nn.relu( tf.add( tf.scalar_mul( beta[i] , deconv[i]), tf.scalar_mul(1.0-beta[i], inlayer[i])))
# sr is the super-resolution process. It really only has enhancement meaning during the current layer of training.
sr[i] = tf.slice(outlayer[i],[0,0,0,0],[-1,-1,-1,numberOfBands])
# The cost funtion to optimize. This is not PSNR but monotonically related
sr_cost[i] = tf.reduce_sum ( tf.pow( sr[i]-y,2))
MSE_sr[i] = sr_cost[i]/(FLAGS.ws*FLAGS.ws*numberOfBands*1.0*FLAGS.batch_size)
PSNR_sr[i] = -10.0*tf.log(MSE_sr[i])/np.log(10.0)
# ADAM optimizers seem to work well
optimizer_layer[i] = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(sr_cost[i], var_list=vars_layer)
optimizer_all[i] = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(sr_cost[i]) | [
"[email protected]"
] | |
eab79d50f246b41e7ca2d6791bef6ec5ac89c03c | ea4e3ac0966fe7b69f42eaa5a32980caa2248957 | /download/unzip/pyobjc/pyobjc-14/pyobjc/stable/PyOpenGL-2.0.2.01/OpenGL/Demo/NeHe/lesson3.py | 499a6e4689f5adda4626afec603848f84836b3c1 | [] | no_license | hyl946/opensource_apple | 36b49deda8b2f241437ed45113d624ad45aa6d5f | e0f41fa0d9d535d57bfe56a264b4b27b8f93d86a | refs/heads/master | 2023-02-26T16:27:25.343636 | 2020-03-29T08:50:45 | 2020-03-29T08:50:45 | 249,169,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,888 | py | #!
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
import string
__version__ = string.split('$Revision: 1.8 $')[1]
__date__ = string.join(string.split('$Date: 2002/12/31 04:13:55 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <[email protected]>'
#
# Ported to PyOpenGL 2.0 by Tarn Weisner Burton 10May2001
#
# This code was created by Richard Campbell '99 (ported to Python/PyOpenGL by John Ferguson 2000)
#
# The port was based on the PyOpenGL tutorial module: dots.py
#
# If you've found this code useful, please let me know (email John Ferguson at [email protected]).
#
# See original source and C based tutorial at http://nehe.gamedev.net
#
# Note:
# -----
# This code is not a good example of Python and using OO techniques. It is a simple and direct
# exposition of how to use the Open GL API in Python via the PyOpenGL package. It also uses GLUT,
# which in my opinion is a high quality library in that it makes my work simpler. Due to using
# these APIs, this code is more like a C program using function based programming (which Python
# is in fact based upon, note the use of closures and lambda) than a "good" OO program.
#
# To run this code get and install OpenGL, GLUT, PyOpenGL (see http://www.python.org), and PyNumeric.
# Installing PyNumeric means having a C compiler that is configured properly, or so I found. For
# Win32 this assumes VC++, I poked through the setup.py for Numeric, and chased through disutils code
# and noticed what seemed to be hard coded preferences for VC++ in the case of a Win32 OS. However,
# I am new to Python and know little about disutils, so I may just be not using it right.
#
# BTW, since this is Python make sure you use tabs or spaces to indent, I had numerous problems since I
# was using editors that were not sensitive to Python.
#
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
# Number of the glut window.
window = 0
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The main drawing function.
def DrawGLScene():
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View
# Move Left 1.5 units and into the screen 6.0 units.
glTranslatef(-1.5, 0.0, -6.0)
# Since we have smooth color mode on, this will be great for the Phish Heads :-).
# Draw a triangle
glBegin(GL_POLYGON) # Start drawing a polygon
glColor3f(1.0, 0.0, 0.0) # Red
glVertex3f(0.0, 1.0, 0.0) # Top
glColor3f(0.0, 1.0, 0.0) # Green
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glColor3f(0.0, 0.0, 1.0) # Blue
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# Move Right 3.0 units.
glTranslatef(3.0, 0.0, 0.0)
# Draw a square (quadrilateral)
glColor3f(0.3, 0.5, 1.0) # Bluish shade
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glVertex3f(-1.0, 1.0, 0.0) # Top Left
glVertex3f(1.0, 1.0, 0.0) # Top Right
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
sys.exit()
def main():
global window
# For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...)
# Once I find out the right stuff based on reading the PyOpenGL source, I'll address this.
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Jeff Molofee's GL Code Tutorial ... NeHe '99")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
print "Hit ESC key to quit."
main()
| [
"[email protected]"
] | |
3257118e28b9313b80431811480ac0d8a136bdf6 | dd6c23aa9e514b77c3902075ea54e8b754fd3bce | /docs/source/conf.py | e32250b11378e8936ab862fdc86707876239259d | [
"MIT"
] | permissive | gvx/wurm | 78b71880ff9acbd503281fbe61d77063bac59643 | c6702aee03785713035ed75632b3898f4fee1664 | refs/heads/master | 2023-05-02T06:14:37.251061 | 2021-05-26T15:34:09 | 2021-05-26T15:34:09 | 328,152,422 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import pathlib
import sys
sys.path.insert(0, str(pathlib.Path(__file__).parent.parent.parent))
# -- Project information -----------------------------------------------------
project = 'wurm'
copyright = '2021, Jasmijn Wellner'
author = 'Jasmijn Wellner'
# The full version, including alpha/beta/rc tags
from wurm import __version__
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
"[email protected]"
] | |
dc2c585ae7d7fca0beee6bf3a1ad69b954519988 | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /pyobjc/pyobjc/pyobjc-framework-Quartz-2.5.1/Examples/TLayer/TLayerDemo.py | d71e50b3335c923a766abb8f7e771799cc0a1a04 | [
"MIT"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | UTF-8 | Python | false | false | 1,877 | py | from Cocoa import *
from PyObjCTools import NibClassBuilder
from Quartz import *
import objc
import ShadowOffsetView
class TLayerDemo (NSObject):
colorWell = objc.IBOutlet()
shadowOffsetView = objc.IBOutlet()
shadowRadiusSlider = objc.IBOutlet()
tlayerView = objc.IBOutlet()
transparencyLayerButton = objc.IBOutlet()
@classmethod
def initialize(self):
NSColorPanel.sharedColorPanel().setShowsAlpha_(True)
def init(self):
self = super(TLayerDemo, self).init()
if self is None:
return None
if not NSBundle.loadNibNamed_owner_("TLayerDemo", self):
NSLog("Failed to load TLayerDemo.nib")
return nil
self.shadowOffsetView.setScale_(40)
self.shadowOffsetView.setOffset_(CGSizeMake(-30, -30))
self.tlayerView.setShadowOffset_(CGSizeMake(-30, -30))
self.shadowRadiusChanged_(self.shadowRadiusSlider)
# Better to do this as a subclass of NSControl....
NSNotificationCenter.defaultCenter(
).addObserver_selector_name_object_(
self, 'shadowOffsetChanged:',
ShadowOffsetView.ShadowOffsetChanged, None)
return self
def dealloc(self):
NSNotificationCenter.defaultCenter().removeObserver_(self)
super(TLayerDemo, self).dealloc()
def window(self):
return self.tlayerView.window()
@objc.IBAction
def shadowRadiusChanged_(self, sender):
self.tlayerView.setShadowRadius_(self.shadowRadiusSlider.floatValue())
@objc.IBAction
def toggleTransparencyLayers_(self, sender):
self.tlayerView.setUsesTransparencyLayers_(self.transparencyLayerButton.state())
def shadowOffsetChanged_(self, notification):
offset = notification.object().offset()
self.tlayerView.setShadowOffset_(offset)
| [
"[email protected]"
] | |
b1ce9c9f3c6a4da4e41e158cd3872a64af2f9ff2 | 6671be3a542925342379d5f6fc691acfebbe281f | /discounts/src/app.py | 496dec244427273c6b9407c558f1a2a838d82d7d | [
"Apache-2.0"
] | permissive | dalmarcogd/mobstore | e79b479b39474873043345b70f7e972f304c1586 | 0b542b9267771a1f4522990d592028dc30ee246f | refs/heads/main | 2023-04-29T22:27:20.344929 | 2021-05-18T12:00:00 | 2021-05-18T12:00:00 | 365,539,054 | 0 | 0 | Apache-2.0 | 2021-05-17T23:22:58 | 2021-05-08T14:46:34 | Go | UTF-8 | Python | false | false | 880 | py | from concurrent import futures
import grpc
from src import settings
from src.consumer import sqs
from src.discountsgrpc import discounts_pb2_grpc
from src.handlers.disounts import Discounts
from src.handlers.products import handle_products_events
from src.handlers.users import handle_users_events
class Server:
@staticmethod
def run():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
discounts_pb2_grpc.add_DiscountsServicer_to_server(Discounts(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
class Consumer:
@staticmethod
def run():
ex = futures.ThreadPoolExecutor(max_workers=2)
ex.submit(sqs.start_pool, settings.PRODUCTS_EVENTS, handle_products_events)
ex.submit(sqs.start_pool, settings.USERS_EVENTS, handle_users_events)
| [
"[email protected]"
] | |
906f84f14666538c126c47c04b7f2193cb3ebbe9 | aa2157e595b89c3512857e41fee16e8b11d7a657 | /Fresher Lavel Logical Programms/self pratice cording.py | 3516f73c6f1c0c6da8089aed6e2689850f2ee33b | [] | no_license | biswaranjanroul/Python-Logical-Programms | efee6276eea3eafab9ee6b6e7e0910b715a504d1 | 152dcecf2ecae7891a11769f250a4dc8d9d6b15f | refs/heads/master | 2022-12-15T07:37:45.978218 | 2020-09-17T13:24:53 | 2020-09-17T13:24:53 | 296,326,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | List=[True,50,10]
List.insert(2,5)
print(List,"Sum is:",sum(List))
| [
"[email protected]"
] | |
61b8e12ef142755e0f21788aadb9c6115e531a51 | 9abc2f4fbf1b31b5a56507437b4a8d9c3f3db7e6 | /newsletter/migrations/0001_initial.py | 7ec8cdad3f338cedbfa3b2dd1bbe2848327e86e9 | [] | no_license | odbalogun/ticketr | e9fe8461d66dabe395f0e1af8fbecc67dbb16e97 | 94f24c82f407f861f1614a151feb3fdd62b283e5 | refs/heads/master | 2022-11-30T22:40:30.931160 | 2019-08-09T14:34:38 | 2019-08-09T14:34:38 | 188,833,600 | 0 | 0 | null | 2022-11-22T03:50:30 | 2019-05-27T11:50:07 | Python | UTF-8 | Python | false | false | 742 | py | # Generated by Django 2.2.1 on 2019-06-09 23:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subscribers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(max_length=100, null=True, verbose_name='first name')),
('last_name', models.CharField(max_length=100, null=True, verbose_name='last name')),
],
),
]
| [
"[email protected]"
] | |
c48d1ed17bcbb58954275bb553132df81fc90245 | 6b6e20004b46165595f35b5789e7426d5289ea48 | /endpoints/csrf.py | 11c225924f6a0baa17a9604c9e0d567a54eb5a0a | [
"Apache-2.0"
] | permissive | anwarchk/quay | 2a83d0ab65aff6a1120fbf3a45dd72f42211633b | 23c5120790c619174e7d36784ca5aab7f4eece5c | refs/heads/master | 2020-09-12T18:53:21.093606 | 2019-11-15T19:29:02 | 2019-11-15T19:29:02 | 222,517,145 | 0 | 0 | Apache-2.0 | 2019-11-18T18:32:35 | 2019-11-18T18:32:35 | null | UTF-8 | Python | false | false | 2,375 | py | import logging
import os
import base64
import hmac
from functools import wraps
from flask import session, request, Response
import features
from app import app
from auth.auth_context import get_validated_oauth_token
from util.http import abort
logger = logging.getLogger(__name__)
OAUTH_CSRF_TOKEN_NAME = '_oauth_csrf_token'
_QUAY_CSRF_TOKEN_NAME = '_csrf_token'
_QUAY_CSRF_HEADER_NAME = 'X-CSRF-Token'
QUAY_CSRF_UPDATED_HEADER_NAME = 'X-Next-CSRF-Token'
def generate_csrf_token(session_token_name=_QUAY_CSRF_TOKEN_NAME, force=False):
""" If not present in the session, generates a new CSRF token with the given name
and places it into the session. Returns the generated token.
"""
if session_token_name not in session or force:
session[session_token_name] = base64.b64encode(os.urandom(48))
return session[session_token_name]
def verify_csrf(session_token_name=_QUAY_CSRF_TOKEN_NAME,
request_token_name=_QUAY_CSRF_TOKEN_NAME,
check_header=True):
""" Verifies that the CSRF token with the given name is found in the session and
that the matching token is found in the request args or values.
"""
token = str(session.get(session_token_name, ''))
found_token = str(request.values.get(request_token_name, ''))
if check_header and not found_token:
found_token = str(request.headers.get(_QUAY_CSRF_HEADER_NAME, ''))
if not token or not found_token or not hmac.compare_digest(token, found_token):
msg = 'CSRF Failure. Session token (%s) was %s and request token (%s) was %s'
logger.error(msg, session_token_name, token, request_token_name, found_token)
abort(403, message='CSRF token was invalid or missing.')
def csrf_protect(session_token_name=_QUAY_CSRF_TOKEN_NAME,
request_token_name=_QUAY_CSRF_TOKEN_NAME,
all_methods=False,
check_header=True):
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Verify the CSRF token.
if get_validated_oauth_token() is None:
if all_methods or (request.method != "GET" and request.method != "HEAD"):
verify_csrf(session_token_name, request_token_name, check_header)
# Invoke the handler.
resp = func(*args, **kwargs)
return resp
return wrapper
return inner
app.jinja_env.globals['csrf_token'] = generate_csrf_token
| [
"[email protected]"
] | |
7c82324df8e0c124b32fe046b39e3485192ab117 | afcb260d6f0c1d88232d2e300d26d8fb71b5ef43 | /django-app/config/urls.py | 34c68213f81c1a11280acec317c46cb45ec32129 | [] | no_license | JeongEuiJin/deploy-eb-docker | e5d10f65166ca8a1a4a5fdd32c9647c0d8f5feed | 1f5b57aa5e119f68c169f059e9bf88d5fbf76850 | refs/heads/master | 2020-12-02T17:46:19.905183 | 2017-07-13T07:32:36 | 2017-07-13T07:32:36 | 96,424,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^post/',include('post.urls')),
url(r'^member/',include('member.urls')),
]
# static root 경로의 파일을 찾는다
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# media root 경로의 파일을 찾는다
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
82d795efd4da1007bea5644cb68b779be1ba7674 | 865bd0c84d06b53a39943dd6d71857e9cfc6d385 | /126-word-ladder-ii/word-ladder-ii.py | 3d138f153124ee6bf15e58335c36caca5c1977cc | [] | no_license | ANDYsGUITAR/leetcode | 1fd107946f4df50cadb9bd7189b9f7b7128dc9f1 | cbca35396738f1fb750f58424b00b9f10232e574 | refs/heads/master | 2020-04-01T18:24:01.072127 | 2019-04-04T08:38:44 | 2019-04-04T08:38:44 | 153,473,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,174 | py | # Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
#
#
# Only one letter can be changed at a time
# Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
#
#
# Note:
#
#
# Return an empty list if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
# You may assume no duplicates in the word list.
# You may assume beginWord and endWord are non-empty and are not the same.
#
#
# Example 1:
#
#
# Input:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# Output:
# [
# ["hit","hot","dot","dog","cog"],
# ["hit","hot","lot","log","cog"]
# ]
#
#
# Example 2:
#
#
# Input:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
#
# Output: []
#
# Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
#
#
#
#
#
class Solution:
def __init__(self):
self.l = float('inf')
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# wordList = set(wordList)
# if endWord not in wordList:
# return []
# ans = []
# def dfs(curr, wordList, path):
# if curr == endWord and path + [curr] not in ans and len(path) + 1 <= self.l:
# ans.append(path + [curr])
# self.l = len(path) + 1
# elif sum([1 if curr[i] != endWord[i] else 0 for i in range(len(curr))]) == 1 and path + [curr, endWord] not in ans and len(path) + 2 <= self.l:
# ans.append(path + [curr, endWord])
# self.l = len(path) + 2
# else:
# for word in wordList:
# diff = [1 if curr[i] != word[i] else 0 for i in range(len(curr))]
# if sum(diff) == 1:
# tmp = [x for x in wordList]
# tmp.remove(word)
# dfs(word, tmp, path + [curr])
# dfs(beginWord, wordList, [])
# result = []
# for path in ans:
# if len(path) == self.l:
# result.append(path)
# return result
if not endWord or not beginWord or endWord not in wordList or not wordList:
return []
wordList = set(wordList)
res = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newlayer = collections.defaultdict(list)
for w in layer:
if w == endWord:
res.extend(k for k in layer[w])
else:
for i in range(len(w)):
for c in 'abcdefghijklmnopqrstuvwxyz':
neww = w[:i]+c+w[i+1:]
if neww in wordList:
newlayer[neww]+=[j+[neww] for j in layer[w]]
wordList -= set(newlayer.keys())
layer = newlayer
return res
| [
"[email protected]"
] | |
8b29bf46fef31ffb57cdaf9a8c463b8d3377add4 | ab9de9d522d9f50a29fd5b7a59bced5add5c588b | /zoom_api/migrations/versions/c358b3b57073_added_required_tables.py | 2ef4ddfa4eb8d57d410605b440c7c06a905bab61 | [] | no_license | DmytroKaminskiy/booksharing | c97d473547109af16b58d25d6a2183493a8f17ae | 26c89a0954d07c1c9d128d05538eff879a061d2f | refs/heads/main | 2023-04-08T13:55:26.430532 | 2021-04-22T18:34:39 | 2021-04-22T18:34:39 | 330,433,074 | 0 | 0 | null | 2021-01-24T15:17:54 | 2021-01-17T16:19:35 | Python | UTF-8 | Python | false | false | 561 | py | """Added required tables
Revision ID: c358b3b57073
Revises: ddbbb5334900
Create Date: 2021-04-15 18:31:39.907841
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c358b3b57073'
down_revision = 'ddbbb5334900'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| [
"[email protected]"
] | |
c27b701be44617207b94395a37a36f5e6ab2037f | 484a348682d9fa515666b94a5cd3a13b1b725a9e | /Leetcode/最近最少使用-缓存机制.py | 995ecc50910ddde2ceeae5df99c69464c1689d74 | [] | no_license | joseph-mutu/Codes-of-Algorithms-and-Data-Structure | 1a73772825c3895419d86d6f1f506d58617f3ff0 | d62591683d0e2a14c72cdc64ae1a36532c3b33db | refs/heads/master | 2020-12-29T17:01:55.097518 | 2020-04-15T19:25:43 | 2020-04-15T19:25:43 | 238,677,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-04 11:32:08
# @Author : mutudeh ([email protected])
# @Link : ${link}
# @Version : $Id$
import os
'''
1. 当 put 一个键值对的时候,如果已经存在相应的键,则重写该值
2. 当 get 一个键时,将相应的节点提取到 head 之后
3. 一个 Hash 表中键为 key (一个值),其存储的即为双向链表中的节点地址
'''
class ListNode(object):
def __init__(self,key = None, value = None):
self.key = key
self.value = value
self.next = None
self.prev = None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.hashmap = {}
self.head = ListNode(-1)
self.tail = ListNode(-1)
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key):
"""
:type key: int
:rtype: int
"""
if self.hashmap.get(key,0):
cur_node = self.hashmap.get(key)
cur_node.next.prev = cur_node.prev
cur_node.prev.next = cur_node.next
tem_node = self.head.next
self.head.next = cur_node
cur_node.next = tem_node
cur_node.prev = self.head
tem_node.prev = cur_node
# print('当前节点',cur_node.value)
return cur_node.value
else:
# print(-1)
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: None
"""
# when it exceeds the max capacity,
# delete the last node
# before the tail and del the corresponding dic
if not self.hashmap.get(key,0) and len(self.hashmap) >= self.capacity:
del_node = self.tail.prev
tem_node = del_node.prev
tem_node.next = self.tail
self.tail.prev = tem_node
tem_key = del_node.key
# print('del_node',del_node.value)
del self.hashmap[tem_key]
del del_node
if self.hashmap.get(key,0):
cur_node = self.hashmap.get(key)
cur_node.value = value
cur_node.next.prev = cur_node.prev
cur_node.prev.next = cur_node.next
else:
cur_node = ListNode(key,value)
self.hashmap[key] = cur_node
tem_node = self.head.next
self.head.next = cur_node
cur_node.next = tem_node
cur_node.prev = self.head
tem_node.prev = cur_node
cache = LRUCache(2)
cache.put(1, 1);
cache.put(2, 2);
cache.get(1); # 返回 1
cache.put(3, 3); # 该操作会使得密钥 2 作废
cache.get(2); # 返回 -1 (未找到)
cache.put(4, 4); # 该操作会使得密钥 1 作废
cache.get(1); # 返回 -1 (未找到)
cache.get(3); # 返回 3
cache.get(4); # 返回 4
| [
"[email protected]"
] | |
a30ff5b0bb92c54ed0b0a2e6332f0b6d13fcba74 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq1553.py | 7ea844d2f64eef952d9421759e00decb9d0d2c5e | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=64
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=29
c.append(cirq.CZ.on(input_qubit[3],input_qubit[1])) # number=30
c.append(cirq.H.on(input_qubit[1])) # number=31
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=40
c.append(cirq.H.on(input_qubit[0])) # number=51
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=52
c.append(cirq.H.on(input_qubit[0])) # number=53
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=48
c.append(cirq.X.on(input_qubit[0])) # number=49
c.append(cirq.H.on(input_qubit[0])) # number=57
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=58
c.append(cirq.H.on(input_qubit[0])) # number=59
c.append(cirq.H.on(input_qubit[0])) # number=54
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=55
c.append(cirq.H.on(input_qubit[0])) # number=56
c.append(cirq.H.on(input_qubit[4])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=61
c.append(cirq.X.on(input_qubit[1])) # number=62
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=63
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=32
c.append(cirq.X.on(input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=42
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=43
c.append(cirq.H.on(input_qubit[3])) # number=44
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.rx(0.6157521601035993).on(input_qubit[1])) # number=60
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1553.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
6f609631be0bfde1bb461c37c628c17074c4b46e | b45d66c2c009d74b4925f07d0d9e779c99ffbf28 | /tests/unit_tests/economics_tests/test_helper_latest_econ.py | 49ac894caf61856731d392068233abe9b6b76693 | [] | no_license | erezrubinstein/aa | d96c0e39762fe7aaeeadebbd51c80b5e58576565 | a3f59ba59519183257ed9a731e8a1516a4c54b48 | refs/heads/master | 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,506 | py | from common.helpers.common_dependency_helper import register_common_mox_dependencies
from common.utilities.inversion_of_control import dependencies, Dependency
from economics.helpers.helpers import get_latest_econ_month
import datetime
import unittest
import mox
__author__ = 'jsternberg'
class EconomicsHelperLatestEconTests(mox.MoxTestBase):
def setUp(self):
super(EconomicsHelperLatestEconTests, self).setUp()
# set up mocks
register_common_mox_dependencies(self.mox)
self.mock_main_access = Dependency("CoreAPIProvider").value
self.main_param = Dependency("CoreAPIParamsBuilder").value
self.context = {
"user": "Alfred E. Neuman",
"source": "What? Me worry?"
}
def tearDown(self):
# remove dependencies for next set of tests
dependencies.clear()
def test_get_latest_econ_month__basic(self):
self.mox.StubOutWithMock(self.mock_main_access.mds, "call_find_entities_raw")
query = {}
fields = ["data.econ_count_by_date"]
sort = [["data.rds_file_id", -1]]
params = self.main_param.mds.create_params(resource="find_entities_raw", query=query, entity_fields=fields,
sort=sort, limit=1)["params"]
mock_stats = [
{
"data": {
"econ_count_by_date": [
{
"count": 198484,
"date": "2014-01-01T00:00:00"
},
{
"count": 4860,
"date": 2013
},
{
"count": 198448,
"date": "2013-12-01T00:00:00"
},
{
"count": 198448,
"date": "2013-11-01T00:00:00"
},
{
"count": 198448,
"date": "2013-10-01T00:00:00"
}
]
}
}
]
self.mock_main_access.mds.call_find_entities_raw("econ_stats", params, context=self.context,
encode_and_decode_results=False).AndReturn(mock_stats)
# replay mode
self.mox.ReplayAll()
expected = datetime.datetime(2014, 1, 1)
latest = get_latest_econ_month(self.main_param, self.mock_main_access, context=self.context)
self.assertEqual(latest, expected)
def test_get_latest_econ_month__real_dates(self):
self.mox.StubOutWithMock(self.mock_main_access.mds, "call_find_entities_raw")
query = {}
fields = ["data.econ_count_by_date"]
sort = [["data.rds_file_id", -1]]
params = self.main_param.mds.create_params(resource="find_entities_raw", query=query, entity_fields=fields,
sort=sort, limit=1)["params"]
mock_stats = [
{
"data": {
"econ_count_by_date": [
{
"count": 198484,
"date": datetime.datetime(2014, 1, 1)
},
{
"count": 4860,
"date": 2013
},
{
"count": 198448,
"date": datetime.datetime(2013, 12, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 11, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 10, 1)
}
]
}
}
]
self.mock_main_access.mds.call_find_entities_raw("econ_stats", params, context=self.context,
encode_and_decode_results=False).AndReturn(mock_stats)
# replay mode
self.mox.ReplayAll()
expected = datetime.datetime(2014, 1, 1)
latest = get_latest_econ_month(self.main_param, self.mock_main_access, context=self.context)
self.assertEqual(latest, expected)
def test_get_latest_econ_month__latest_month_incomplete(self):
self.mox.StubOutWithMock(self.mock_main_access.mds, "call_find_entities_raw")
query = {}
fields = ["data.econ_count_by_date"]
sort = [["data.rds_file_id", -1]]
params = self.main_param.mds.create_params(resource="find_entities_raw", query=query, entity_fields=fields,
sort=sort, limit=1)["params"]
mock_stats = [
{
"data": {
"econ_count_by_date": [
{
"count": 180000,
"date": datetime.datetime(2014, 1, 1)
},
{
"count": 4860,
"date": 2013
},
{
"count": 198448,
"date": datetime.datetime(2013, 12, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 11, 1)
},
{
"count": 198448,
"date": datetime.datetime(2013, 10, 1)
}
]
}
}
]
self.mock_main_access.mds.call_find_entities_raw("econ_stats", params, context=self.context,
encode_and_decode_results=False).AndReturn(mock_stats)
# replay mode
self.mox.ReplayAll()
expected = datetime.datetime(2013, 12, 1)
latest = get_latest_econ_month(self.main_param, self.mock_main_access, context=self.context)
self.assertEqual(latest, expected)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a29ecf15be0d4978523be8694a1b92871b614daf | b21051c06de442684cf7573780c14ec2384c1d0a | /webrecorder/webrecorder/logincontroller.py | 8285944f7910441479c1288ac50fc4498ec07dc0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | italoadler/webrecorder | 47645d318b4303631b064bc8bb3f3a530f81b2b3 | 637214afe6246572ed644ec9c426e9356a0f5231 | refs/heads/master | 2021-01-15T21:49:20.094575 | 2016-09-09T23:43:34 | 2016-09-09T23:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,527 | py | from bottle import request
from os.path import expandvars
from webrecorder.webreccork import ValidationException
from webrecorder.basecontroller import BaseController
import json
# ============================================================================
LOGIN_PATH = '/_login'
LOGIN_MODAL_PATH = '/_login_modal'
LOGOUT_PATH = '/_logout'
CREATE_PATH = '/_create'
REGISTER_PATH = '/_register'
VAL_REG_PATH = '/_valreg/<reg>'
INVITE_PATH = '/_invite'
FORGOT_PATH = '/_forgot'
RESET_POST = '/_resetpassword'
RESET_PATH = '/_resetpassword/<resetcode>'
RESET_PATH_FILL = '/_resetpassword/{0}?username={1}'
UPDATE_PASS_PATH = '/_updatepassword'
SETTINGS = '/_settings'
# ============================================================================
class LoginController(BaseController):
def __init__(self, *args, **kwargs):
config = kwargs.get('config')
invites = expandvars(config.get('invites_enabled', 'true')).lower()
self.invites_enabled = invites in ('true', '1', 'yes')
super(LoginController, self).__init__(*args, **kwargs)
def init_routes(self):
# Login/Logout
# ============================================================================
@self.app.get(LOGIN_PATH)
@self.jinja2_view('login.html')
def login():
self.redirect_home_if_logged_in()
resp = {}
self.fill_anon_info(resp)
return resp
@self.app.get(LOGIN_MODAL_PATH)
@self.jinja2_view('login_modal.html')
def login_modal():
#self.redirect_home_if_logged_in()
resp = {}
self.fill_anon_info(resp)
return resp
@self.app.post(LOGIN_PATH)
def login_post():
self.redirect_home_if_logged_in()
"""Authenticate users"""
username = self.post_get('username')
password = self.post_get('password')
try:
move_info = self.get_move_temp_info()
except ValidationException as ve:
self.flash_message('Login Failed: ' + str(ve))
self.redirect('/')
return
# if a collection is being moved, auth user
# and then check for available space
# if not enough space, don't continue with login
if move_info and (self.manager.cork.
is_authenticate(username, password)):
if not self.manager.has_space_for_new_coll(username,
move_info['from_user'],
'temp'):
self.flash_message('Sorry, not enough space to import this Temporary Collection into your account.')
self.redirect('/')
return
if self.manager.cork.login(username, password):
sesh = self.get_session()
sesh.curr_user = username
if move_info:
try:
new_title = self.manager.move_temp_coll(username, move_info)
if new_title:
self.flash_message('Collection <b>{0}</b> created!'.format(new_title), 'success')
except:
import traceback
traceback.print_exc()
remember_me = (self.post_get('remember_me') == '1')
sesh.logged_in(remember_me)
redir_to = request.headers.get('Referer')
host = self.get_host()
temp_prefix = self.manager.temp_prefix
if not redir_to or redir_to.startswith((host + '/' + temp_prefix,
host + '/_')):
redir_to = self.get_path(username)
else:
self.flash_message('Invalid Login. Please Try Again')
redir_to = LOGIN_PATH
self.redirect(redir_to)
@self.app.get(LOGOUT_PATH)
def logout():
redir_to = '/'
self.manager.cork.logout(success_redirect=redir_to, fail_redirect=redir_to)
# Register/Invite/Confirm
# ============================================================================
@self.app.get(REGISTER_PATH)
@self.jinja2_view('register.html')
def register():
self.redirect_home_if_logged_in()
if not self.invites_enabled:
resp = {'email': '',
'skip_invite': True}
self.fill_anon_info(resp)
return resp
invitecode = request.query.getunicode('invite', '')
email = ''
try:
email = self.manager.is_valid_invite(invitecode)
except ValidationException as ve:
self.flash_message(str(ve))
return { 'email': email,
'invite': invitecode}
@self.app.post(INVITE_PATH)
def invite_post():
self.redirect_home_if_logged_in()
email = self.post_get('email')
name = self.post_get('name')
desc = self.post_get('desc')
if self.manager.save_invite(email, name, desc):
self.flash_message('Thank you for your interest! We will send you an invite to try webrecorder.io soon!', 'success')
self.redirect('/')
else:
self.flash_message('Oops, something went wrong, please try again')
self.redirect(REGISTER_PATH)
@self.app.post(REGISTER_PATH)
def register_post():
self.redirect_home_if_logged_in()
email = self.post_get('email')
username = self.post_get('username')
password = self.post_get('password')
name = self.post_get('name')
confirm_password = self.post_get('confirmpassword')
invitecode = self.post_get('invite')
redir_to = REGISTER_PATH
if username.startswith(self.manager.temp_prefix):
self.flash_message('Sorry, this is not a valid username')
self.redirect(redir_to)
return
try:
move_info = self.get_move_temp_info()
except ValidationException as ve:
self.flash_message('Registration Failed: ' + str(ve))
self.redirect('/')
return
if self.invites_enabled:
try:
val_email = self.manager.is_valid_invite(invitecode)
if val_email != email:
raise ValidationException('Sorry, this invite can only be used with email: {0}'.format(val_email))
except ValidationException as ve:
self.flash_message(str(ve))
self.redirect(redir_to)
return
redir_to += '?invite=' + invitecode
try:
self.manager.validate_user(username, email)
self.manager.validate_password(password, confirm_password)
#TODO: set default host?
host = self.get_host()
desc = {'name': name}
if move_info:
desc['move_info'] = move_info
desc = json.dumps(desc)
self.manager.cork.register(username, password, email, role='archivist',
max_level=50,
subject='webrecorder.io Account Creation',
email_template='templates/emailconfirm.html',
description=desc,
host=host)
self.flash_message('A confirmation e-mail has been sent to <b>{0}</b>. \
Please check your e-mail to complete the registration!'.format(username), 'warning')
redir_to = '/'
if self.invites_enabled:
self.manager.delete_invite(email)
except ValidationException as ve:
self.flash_message(str(ve))
except Exception as ex:
self.flash_message('Registration failed: ' + str(ex))
self.redirect(redir_to)
# Validate Registration
@self.app.get(VAL_REG_PATH)
def val_reg(reg):
self.redirect_home_if_logged_in()
try:
username, first_coll = self.manager.create_user(reg)
#self.flash_message('<b>{0}</b>, welcome to your new archive home page! \
#Click the <b>Create New Collection</b> button to create your first collection. Happy Archiving!'.format(username), 'success')
#redir_to = '/' + username
msg = '<b>{0}</b>, you are now logged in!'
if first_coll == 'Default Collection':
msg += ' The <b>{1}</b> collection has been created for you, and you can begin recording by entering a url below!'
else:
msg += ' The <b>{1}</b> collection has been permanently saved for you, and you can continue recording by entering a url below!'
self.flash_message(msg.format(username, first_coll), 'success')
redir_to = '/'
except ValidationException:
self.flash_message('The user <b>{0}</b> is already registered. \
If this is you, please login or click forgot password, \
or register a new account.'.format(username))
redir_to = LOGIN_PATH
except Exception as e:
import traceback
traceback.print_exc()
self.flash_message('Sorry, this is not a valid registration code. Please try again.')
redir_to = REGISTER_PATH
self.redirect(redir_to)
# Forgot Password
# ============================================================================
@self.app.get(FORGOT_PATH)
@self.jinja2_view('forgot.html')
def forgot():
self.redirect_home_if_logged_in()
return {}
@self.app.post(FORGOT_PATH)
def forgot_submit():
self.redirect_home_if_logged_in()
email = self.post_get('email')
username = self.post_get('username')
host = self.get_host()
try:
self.manager.cork.send_password_reset_email(username=username,
email_addr=email,
subject='webrecorder.io password reset confirmation',
email_template='templates/emailreset.html',
host=host)
self.flash_message('A password reset e-mail has been sent to your e-mail!', 'success')
redir_to = '/'
except Exception as e:
self.flash_message(str(e))
redir_to = FORGOT_PATH
self.redirect(redir_to)
# Reset Password
# ============================================================================
@self.app.get(RESET_PATH)
@self.jinja2_view('reset.html')
def resetpass(resetcode):
self.redirect_home_if_logged_in()
try:
username = request.query['username']
result = {'username': username,
'resetcode': resetcode}
except Exception as e:
print(e)
self.flash_message('Invalid password reset attempt. Please try again')
self.redirect(FORGOT_PATH)
return result
@self.app.post(RESET_POST)
def do_reset():
self.redirect_home_if_logged_in()
username = self.post_get('username')
resetcode = self.post_get('resetcode')
password = self.post_get('password')
confirm_password = self.post_get('confirmpassword')
try:
self.manager.validate_password(password, confirm_password)
self.manager.cork.reset_password(resetcode, password)
self.flash_message('Your password has been successfully reset! \
You can now <b>login</b> with your new password!', 'success')
redir_to = LOGIN_PATH
except ValidationException as ve:
self.flash_message(str(ve))
redir_to = RESET_PATH_FILL.format(resetcode, username)
except Exception as e:
self.flash_message('Invalid password reset attempt. Please try again')
redir_to = FORGOT_PATH
self.redirect(redir_to)
# Update Password
@self.app.post(UPDATE_PASS_PATH)
def update_password():
self.redirect_home_if_logged_in()
self.manager.cork.require(role='archivist', fail_redirect=LOGIN_PATH)
curr_password = self.post_get('curr_password')
password = self.post_get('password')
confirm_password = self.post_get('confirmpassword')
try:
self.manager.update_password(curr_password, password, confirm_password)
self.flash_message('Password Updated', 'success')
except ValidationException as ve:
self.flash_message(str(ve))
user = self.manager.get_curr_user()
self.redirect(self.get_path(user) + SETTINGS)
def redirect_home_if_logged_in(self):
sesh = self.get_session()
if sesh.curr_user:
self.flash_message('You are already logged in as <b>{0}</b>'.format(sesh.curr_user))
self.redirect('/')
def get_move_temp_info(self):
move_info = None
move_temp = self.post_get('move-temp')
if move_temp == '1':
to_coll_title = self.post_get('to-coll')
to_coll = self.sanitize_title(to_coll_title)
if not to_coll:
raise ValidationException('Invalid new collection name, please pick a different name')
sesh = self.get_session()
if sesh.is_anon() and to_coll:
move_info = {'from_user': sesh.anon_user,
'to_coll': to_coll,
'to_title': to_coll_title,
}
return move_info
| [
"[email protected]"
] | |
982cf2b15a858f104cd7853917e5d7ef1ccfe09c | 130215e73cd45824fc5b7b2bc85949ce03115f20 | /py/fo7_2.py | d19952ac6b19f7b1f3b6dd96d2c5b240d94f06aa | [] | no_license | felicitygong/MINLPinstances | 062634bf709a782a860234ec2daa7e6bf374371e | 1cd9c799c5758baa0818394c07adea84659c064c | refs/heads/master | 2022-12-06T11:58:14.141832 | 2022-12-01T17:17:35 | 2022-12-01T17:17:35 | 119,295,560 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,074 | py | # MINLP written by GAMS Convert at 11/10/17 15:35:21
#
# Equation counts
# Total E G L N X C B
# 212 1 0 211 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 115 73 42 0 0 0 0 0
# FX 2 2 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 869 855 14 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(1.7889,8.54),initialize=1.7889)
m.x87 = Var(within=Reals,bounds=(1.7889,8.54),initialize=1.7889)
m.x88 = Var(within=Reals,bounds=(1.7889,8.54),initialize=1.7889)
m.x89 = Var(within=Reals,bounds=(2.7692,8.5399),initialize=2.7692)
m.x90 = Var(within=Reals,bounds=(1.3416,6.7082),initialize=1.3416)
m.x91 = Var(within=Reals,bounds=(1.3416,6.7082),initialize=1.3416)
m.x92 = Var(within=Reals,bounds=(1.3416,6.7082),initialize=1.3416)
m.x93 = Var(within=Reals,bounds=(8.54,8.54),initialize=8.54)
m.x94 = Var(within=Reals,bounds=(1.8735,8.944),initialize=1.8735)
m.x95 = Var(within=Reals,bounds=(1.8735,8.944),initialize=1.8735)
m.x96 = Var(within=Reals,bounds=(1.8735,8.944),initialize=1.8735)
m.x97 = Var(within=Reals,bounds=(4.2155,13),initialize=4.2155)
m.x98 = Var(within=Reals,bounds=(1.3416,6.7082),initialize=1.3416)
m.x99 = Var(within=Reals,bounds=(1.3416,6.7082),initialize=1.3416)
m.x100 = Var(within=Reals,bounds=(1.3416,6.7082),initialize=1.3416)
m.x101 = Var(within=Reals,bounds=(13,13),initialize=13)
m.x102 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr= m.x44 + m.x45 + m.x56 + m.x57 + m.x66 + m.x67 + m.x74 + m.x75 + m.x80 + m.x81 + m.x84 + m.x85
, sense=minimize)
m.c2 = Constraint(expr= m.x102 - m.x103 <= 0)
m.c3 = Constraint(expr= 0.5*m.x86 - m.x93 + m.x102 <= 0)
m.c4 = Constraint(expr= 0.5*m.x86 - m.x102 <= 0)
m.c5 = Constraint(expr= 0.5*m.x94 - m.x101 + m.x109 <= 0)
m.c6 = Constraint(expr= 0.5*m.x94 - m.x109 <= 0)
m.c7 = Constraint(expr= 0.5*m.x87 - m.x93 + m.x103 <= 0)
m.c8 = Constraint(expr= 0.5*m.x87 - m.x103 <= 0)
m.c9 = Constraint(expr= 0.5*m.x95 - m.x101 + m.x110 <= 0)
m.c10 = Constraint(expr= 0.5*m.x95 - m.x110 <= 0)
m.c11 = Constraint(expr= 0.5*m.x88 - m.x93 + m.x104 <= 0)
m.c12 = Constraint(expr= 0.5*m.x88 - m.x104 <= 0)
m.c13 = Constraint(expr= 0.5*m.x96 - m.x101 + m.x111 <= 0)
m.c14 = Constraint(expr= 0.5*m.x96 - m.x111 <= 0)
m.c15 = Constraint(expr= 0.5*m.x89 - m.x93 + m.x105 <= 0)
m.c16 = Constraint(expr= 0.5*m.x89 - m.x105 <= 0)
m.c17 = Constraint(expr= 0.5*m.x97 - m.x101 + m.x112 <= 0)
m.c18 = Constraint(expr= 0.5*m.x97 - m.x112 <= 0)
m.c19 = Constraint(expr= 0.5*m.x90 - m.x93 + m.x106 <= 0)
m.c20 = Constraint(expr= 0.5*m.x90 - m.x106 <= 0)
m.c21 = Constraint(expr= 0.5*m.x98 - m.x101 + m.x113 <= 0)
m.c22 = Constraint(expr= 0.5*m.x98 - m.x113 <= 0)
m.c23 = Constraint(expr= 0.5*m.x91 - m.x93 + m.x107 <= 0)
m.c24 = Constraint(expr= 0.5*m.x91 - m.x107 <= 0)
m.c25 = Constraint(expr= 0.5*m.x99 - m.x101 + m.x114 <= 0)
m.c26 = Constraint(expr= 0.5*m.x99 - m.x114 <= 0)
m.c27 = Constraint(expr= 0.5*m.x92 - m.x93 + m.x108 <= 0)
m.c28 = Constraint(expr= 0.5*m.x92 - m.x108 <= 0)
m.c29 = Constraint(expr= 0.5*m.x100 - m.x101 + m.x115 <= 0)
m.c30 = Constraint(expr= 0.5*m.x100 - m.x115 <= 0)
m.c31 = Constraint(expr= - m.x44 + m.x102 - m.x103 <= 0)
m.c32 = Constraint(expr= - m.x44 - m.x102 + m.x103 <= 0)
m.c33 = Constraint(expr= - m.x45 + m.x109 - m.x110 <= 0)
m.c34 = Constraint(expr= - m.x45 - m.x109 + m.x110 <= 0)
m.c35 = Constraint(expr= - 8.54*m.b1 - 8.54*m.b2 + 0.5*m.x86 + 0.5*m.x87 - m.x102 + m.x103 <= 0)
m.c36 = Constraint(expr= - 8.54*m.b1 + 8.54*m.b2 + 0.5*m.x86 + 0.5*m.x87 + m.x102 - m.x103 <= 8.54)
m.c37 = Constraint(expr= 13*m.b1 - 13*m.b2 + 0.5*m.x94 + 0.5*m.x95 - m.x109 + m.x110 <= 13)
m.c38 = Constraint(expr= 13*m.b1 + 13*m.b2 + 0.5*m.x94 + 0.5*m.x95 + m.x109 - m.x110 <= 26)
m.c39 = Constraint(expr= - m.x46 + m.x102 - m.x104 <= 0)
m.c40 = Constraint(expr= - m.x46 - m.x102 + m.x104 <= 0)
m.c41 = Constraint(expr= - m.x47 + m.x109 - m.x111 <= 0)
m.c42 = Constraint(expr= - m.x47 - m.x109 + m.x111 <= 0)
m.c43 = Constraint(expr= - 8.54*m.b3 - 8.54*m.b4 + 0.5*m.x86 + 0.5*m.x88 - m.x102 + m.x104 <= 0)
m.c44 = Constraint(expr= - 8.54*m.b3 + 8.54*m.b4 + 0.5*m.x86 + 0.5*m.x88 + m.x102 - m.x104 <= 8.54)
m.c45 = Constraint(expr= 13*m.b3 - 13*m.b4 + 0.5*m.x94 + 0.5*m.x96 - m.x109 + m.x111 <= 13)
m.c46 = Constraint(expr= 13*m.b3 + 13*m.b4 + 0.5*m.x94 + 0.5*m.x96 + m.x109 - m.x111 <= 26)
m.c47 = Constraint(expr= - m.x48 + m.x102 - m.x105 <= 0)
m.c48 = Constraint(expr= - m.x48 - m.x102 + m.x105 <= 0)
m.c49 = Constraint(expr= - m.x49 + m.x109 - m.x112 <= 0)
m.c50 = Constraint(expr= - m.x49 - m.x109 + m.x112 <= 0)
m.c51 = Constraint(expr= - 8.54*m.b5 - 8.54*m.b6 + 0.5*m.x86 + 0.5*m.x89 - m.x102 + m.x105 <= 0)
m.c52 = Constraint(expr= - 8.54*m.b5 + 8.54*m.b6 + 0.5*m.x86 + 0.5*m.x89 + m.x102 - m.x105 <= 8.54)
m.c53 = Constraint(expr= 13*m.b5 - 13*m.b6 + 0.5*m.x94 + 0.5*m.x97 - m.x109 + m.x112 <= 13)
m.c54 = Constraint(expr= 13*m.b5 + 13*m.b6 + 0.5*m.x94 + 0.5*m.x97 + m.x109 - m.x112 <= 26)
m.c55 = Constraint(expr= - m.x50 + m.x102 - m.x106 <= 0)
m.c56 = Constraint(expr= - m.x50 - m.x102 + m.x106 <= 0)
m.c57 = Constraint(expr= - m.x51 + m.x109 - m.x113 <= 0)
m.c58 = Constraint(expr= - m.x51 - m.x109 + m.x113 <= 0)
m.c59 = Constraint(expr= - 8.54*m.b7 - 8.54*m.b8 + 0.5*m.x86 + 0.5*m.x90 - m.x102 + m.x106 <= 0)
m.c60 = Constraint(expr= - 8.54*m.b7 + 8.54*m.b8 + 0.5*m.x86 + 0.5*m.x90 + m.x102 - m.x106 <= 8.54)
m.c61 = Constraint(expr= 13*m.b7 - 13*m.b8 + 0.5*m.x94 + 0.5*m.x98 - m.x109 + m.x113 <= 13)
m.c62 = Constraint(expr= 13*m.b7 + 13*m.b8 + 0.5*m.x94 + 0.5*m.x98 + m.x109 - m.x113 <= 26)
m.c63 = Constraint(expr= - m.x52 + m.x102 - m.x107 <= 0)
m.c64 = Constraint(expr= - m.x52 - m.x102 + m.x107 <= 0)
m.c65 = Constraint(expr= - m.x53 + m.x109 - m.x114 <= 0)
m.c66 = Constraint(expr= - m.x53 - m.x109 + m.x114 <= 0)
m.c67 = Constraint(expr= - 8.54*m.b9 - 8.54*m.b10 + 0.5*m.x86 + 0.5*m.x91 - m.x102 + m.x107 <= 0)
m.c68 = Constraint(expr= - 8.54*m.b9 + 8.54*m.b10 + 0.5*m.x86 + 0.5*m.x91 + m.x102 - m.x107 <= 8.54)
m.c69 = Constraint(expr= 13*m.b9 - 13*m.b10 + 0.5*m.x94 + 0.5*m.x99 - m.x109 + m.x114 <= 13)
m.c70 = Constraint(expr= 13*m.b9 + 13*m.b10 + 0.5*m.x94 + 0.5*m.x99 + m.x109 - m.x114 <= 26)
m.c71 = Constraint(expr= - m.x54 + m.x102 - m.x108 <= 0)
m.c72 = Constraint(expr= - m.x54 - m.x102 + m.x108 <= 0)
m.c73 = Constraint(expr= - m.x55 + m.x109 - m.x115 <= 0)
m.c74 = Constraint(expr= - m.x55 - m.x109 + m.x115 <= 0)
m.c75 = Constraint(expr= - 8.54*m.b11 - 8.54*m.b12 + 0.5*m.x86 + 0.5*m.x92 - m.x102 + m.x108 <= 0)
m.c76 = Constraint(expr= - 8.54*m.b11 + 8.54*m.b12 + 0.5*m.x86 + 0.5*m.x92 + m.x102 - m.x108 <= 8.54)
m.c77 = Constraint(expr= 13*m.b11 - 13*m.b12 + 0.5*m.x94 + 0.5*m.x100 - m.x109 + m.x115 <= 13)
m.c78 = Constraint(expr= 13*m.b11 + 13*m.b12 + 0.5*m.x94 + 0.5*m.x100 + m.x109 - m.x115 <= 26)
m.c79 = Constraint(expr= - m.x56 + m.x103 - m.x104 <= 0)
m.c80 = Constraint(expr= - m.x56 - m.x103 + m.x104 <= 0)
m.c81 = Constraint(expr= - m.x57 + m.x110 - m.x111 <= 0)
m.c82 = Constraint(expr= - m.x57 - m.x110 + m.x111 <= 0)
m.c83 = Constraint(expr= - 8.54*m.b13 - 8.54*m.b14 + 0.5*m.x87 + 0.5*m.x88 - m.x103 + m.x104 <= 0)
m.c84 = Constraint(expr= - 8.54*m.b13 + 8.54*m.b14 + 0.5*m.x87 + 0.5*m.x88 + m.x103 - m.x104 <= 8.54)
m.c85 = Constraint(expr= 13*m.b13 - 13*m.b14 + 0.5*m.x95 + 0.5*m.x96 - m.x110 + m.x111 <= 13)
m.c86 = Constraint(expr= 13*m.b13 + 13*m.b14 + 0.5*m.x95 + 0.5*m.x96 + m.x110 - m.x111 <= 26)
m.c87 = Constraint(expr= - m.x58 + m.x103 - m.x105 <= 0)
m.c88 = Constraint(expr= - m.x58 - m.x103 + m.x105 <= 0)
m.c89 = Constraint(expr= - m.x59 + m.x110 - m.x112 <= 0)
m.c90 = Constraint(expr= - m.x59 - m.x110 + m.x112 <= 0)
m.c91 = Constraint(expr= - 8.54*m.b15 - 8.54*m.b16 + 0.5*m.x87 + 0.5*m.x89 - m.x103 + m.x105 <= 0)
m.c92 = Constraint(expr= - 8.54*m.b15 + 8.54*m.b16 + 0.5*m.x87 + 0.5*m.x89 + m.x103 - m.x105 <= 8.54)
m.c93 = Constraint(expr= 13*m.b15 - 13*m.b16 + 0.5*m.x95 + 0.5*m.x97 - m.x110 + m.x112 <= 13)
m.c94 = Constraint(expr= 13*m.b15 + 13*m.b16 + 0.5*m.x95 + 0.5*m.x97 + m.x110 - m.x112 <= 26)
m.c95 = Constraint(expr= - m.x60 + m.x103 - m.x106 <= 0)
m.c96 = Constraint(expr= - m.x60 - m.x103 + m.x106 <= 0)
m.c97 = Constraint(expr= - m.x61 + m.x110 - m.x113 <= 0)
m.c98 = Constraint(expr= - m.x61 - m.x110 + m.x113 <= 0)
m.c99 = Constraint(expr= - 8.54*m.b17 - 8.54*m.b18 + 0.5*m.x87 + 0.5*m.x90 - m.x103 + m.x106 <= 0)
m.c100 = Constraint(expr= - 8.54*m.b17 + 8.54*m.b18 + 0.5*m.x87 + 0.5*m.x90 + m.x103 - m.x106 <= 8.54)
m.c101 = Constraint(expr= 13*m.b17 - 13*m.b18 + 0.5*m.x95 + 0.5*m.x98 - m.x110 + m.x113 <= 13)
m.c102 = Constraint(expr= 13*m.b17 + 13*m.b18 + 0.5*m.x95 + 0.5*m.x98 + m.x110 - m.x113 <= 26)
m.c103 = Constraint(expr= - m.x62 + m.x103 - m.x107 <= 0)
m.c104 = Constraint(expr= - m.x62 - m.x103 + m.x107 <= 0)
m.c105 = Constraint(expr= - m.x63 + m.x110 - m.x114 <= 0)
m.c106 = Constraint(expr= - m.x63 - m.x110 + m.x114 <= 0)
m.c107 = Constraint(expr= - 8.54*m.b19 - 8.54*m.b20 + 0.5*m.x87 + 0.5*m.x91 - m.x103 + m.x107 <= 0)
m.c108 = Constraint(expr= - 8.54*m.b19 + 8.54*m.b20 + 0.5*m.x87 + 0.5*m.x91 + m.x103 - m.x107 <= 8.54)
m.c109 = Constraint(expr= 13*m.b19 - 13*m.b20 + 0.5*m.x95 + 0.5*m.x99 - m.x110 + m.x114 <= 13)
m.c110 = Constraint(expr= 13*m.b19 + 13*m.b20 + 0.5*m.x95 + 0.5*m.x99 + m.x110 - m.x114 <= 26)
m.c111 = Constraint(expr= - m.x64 + m.x103 - m.x108 <= 0)
m.c112 = Constraint(expr= - m.x64 - m.x103 + m.x108 <= 0)
m.c113 = Constraint(expr= - m.x65 + m.x110 - m.x115 <= 0)
m.c114 = Constraint(expr= - m.x65 - m.x110 + m.x115 <= 0)
m.c115 = Constraint(expr= - 8.54*m.b21 - 8.54*m.b22 + 0.5*m.x87 + 0.5*m.x92 - m.x103 + m.x108 <= 0)
m.c116 = Constraint(expr= - 8.54*m.b21 + 8.54*m.b22 + 0.5*m.x87 + 0.5*m.x92 + m.x103 - m.x108 <= 8.54)
m.c117 = Constraint(expr= 13*m.b21 - 13*m.b22 + 0.5*m.x95 + 0.5*m.x100 - m.x110 + m.x115 <= 13)
m.c118 = Constraint(expr= 13*m.b21 + 13*m.b22 + 0.5*m.x95 + 0.5*m.x100 + m.x110 - m.x115 <= 26)
m.c119 = Constraint(expr= - m.x66 + m.x104 - m.x105 <= 0)
m.c120 = Constraint(expr= - m.x66 - m.x104 + m.x105 <= 0)
m.c121 = Constraint(expr= - m.x67 + m.x111 - m.x112 <= 0)
m.c122 = Constraint(expr= - m.x67 - m.x111 + m.x112 <= 0)
m.c123 = Constraint(expr= - 8.54*m.b23 - 8.54*m.b24 + 0.5*m.x88 + 0.5*m.x89 - m.x104 + m.x105 <= 0)
m.c124 = Constraint(expr= - 8.54*m.b23 + 8.54*m.b24 + 0.5*m.x88 + 0.5*m.x89 + m.x104 - m.x105 <= 8.54)
m.c125 = Constraint(expr= 13*m.b23 - 13*m.b24 + 0.5*m.x96 + 0.5*m.x97 - m.x111 + m.x112 <= 13)
m.c126 = Constraint(expr= 13*m.b23 + 13*m.b24 + 0.5*m.x96 + 0.5*m.x97 + m.x111 - m.x112 <= 26)
m.c127 = Constraint(expr= - m.x68 + m.x104 - m.x106 <= 0)
m.c128 = Constraint(expr= - m.x68 - m.x104 + m.x106 <= 0)
m.c129 = Constraint(expr= - m.x69 + m.x111 - m.x113 <= 0)
m.c130 = Constraint(expr= - m.x69 - m.x111 + m.x113 <= 0)
m.c131 = Constraint(expr= - 8.54*m.b25 - 8.54*m.b26 + 0.5*m.x88 + 0.5*m.x90 - m.x104 + m.x106 <= 0)
m.c132 = Constraint(expr= - 8.54*m.b25 + 8.54*m.b26 + 0.5*m.x88 + 0.5*m.x90 + m.x104 - m.x106 <= 8.54)
m.c133 = Constraint(expr= 13*m.b25 - 13*m.b26 + 0.5*m.x96 + 0.5*m.x98 - m.x111 + m.x113 <= 13)
m.c134 = Constraint(expr= 13*m.b25 + 13*m.b26 + 0.5*m.x96 + 0.5*m.x98 + m.x111 - m.x113 <= 26)
m.c135 = Constraint(expr= - m.x70 + m.x104 - m.x107 <= 0)
m.c136 = Constraint(expr= - m.x70 - m.x104 + m.x107 <= 0)
m.c137 = Constraint(expr= - m.x71 + m.x111 - m.x114 <= 0)
m.c138 = Constraint(expr= - m.x71 - m.x111 + m.x114 <= 0)
m.c139 = Constraint(expr= - 8.54*m.b27 - 8.54*m.b28 + 0.5*m.x88 + 0.5*m.x91 - m.x104 + m.x107 <= 0)
m.c140 = Constraint(expr= - 8.54*m.b27 + 8.54*m.b28 + 0.5*m.x88 + 0.5*m.x91 + m.x104 - m.x107 <= 8.54)
m.c141 = Constraint(expr= 13*m.b27 - 13*m.b28 + 0.5*m.x96 + 0.5*m.x99 - m.x111 + m.x114 <= 13)
m.c142 = Constraint(expr= 13*m.b27 + 13*m.b28 + 0.5*m.x96 + 0.5*m.x99 + m.x111 - m.x114 <= 26)
m.c143 = Constraint(expr= - m.x72 + m.x104 - m.x108 <= 0)
m.c144 = Constraint(expr= - m.x72 - m.x104 + m.x108 <= 0)
m.c145 = Constraint(expr= - m.x73 + m.x111 - m.x115 <= 0)
m.c146 = Constraint(expr= - m.x73 - m.x111 + m.x115 <= 0)
m.c147 = Constraint(expr= - 8.54*m.b29 - 8.54*m.b30 + 0.5*m.x88 + 0.5*m.x92 - m.x104 + m.x108 <= 0)
m.c148 = Constraint(expr= - 8.54*m.b29 + 8.54*m.b30 + 0.5*m.x88 + 0.5*m.x92 + m.x104 - m.x108 <= 8.54)
m.c149 = Constraint(expr= 13*m.b29 - 13*m.b30 + 0.5*m.x96 + 0.5*m.x100 - m.x111 + m.x115 <= 13)
m.c150 = Constraint(expr= 13*m.b29 + 13*m.b30 + 0.5*m.x96 + 0.5*m.x100 + m.x111 - m.x115 <= 26)
m.c151 = Constraint(expr= - m.x74 + m.x105 - m.x106 <= 0)
m.c152 = Constraint(expr= - m.x74 - m.x105 + m.x106 <= 0)
m.c153 = Constraint(expr= - m.x75 + m.x112 - m.x113 <= 0)
m.c154 = Constraint(expr= - m.x75 - m.x112 + m.x113 <= 0)
m.c155 = Constraint(expr= - 8.54*m.b31 - 8.54*m.b32 + 0.5*m.x89 + 0.5*m.x90 - m.x105 + m.x106 <= 0)
m.c156 = Constraint(expr= - 8.54*m.b31 + 8.54*m.b32 + 0.5*m.x89 + 0.5*m.x90 + m.x105 - m.x106 <= 8.54)
m.c157 = Constraint(expr= 13*m.b31 - 13*m.b32 + 0.5*m.x97 + 0.5*m.x98 - m.x112 + m.x113 <= 13)
m.c158 = Constraint(expr= 13*m.b31 + 13*m.b32 + 0.5*m.x97 + 0.5*m.x98 + m.x112 - m.x113 <= 26)
m.c159 = Constraint(expr= - m.x76 + m.x105 - m.x107 <= 0)
m.c160 = Constraint(expr= - m.x76 - m.x105 + m.x107 <= 0)
m.c161 = Constraint(expr= - m.x77 + m.x112 - m.x114 <= 0)
m.c162 = Constraint(expr= - m.x77 - m.x112 + m.x114 <= 0)
m.c163 = Constraint(expr= - 8.54*m.b33 - 8.54*m.b34 + 0.5*m.x89 + 0.5*m.x91 - m.x105 + m.x107 <= 0)
m.c164 = Constraint(expr= - 8.54*m.b33 + 8.54*m.b34 + 0.5*m.x89 + 0.5*m.x91 + m.x105 - m.x107 <= 8.54)
m.c165 = Constraint(expr= 13*m.b33 - 13*m.b34 + 0.5*m.x97 + 0.5*m.x99 - m.x112 + m.x114 <= 13)
m.c166 = Constraint(expr= 13*m.b33 + 13*m.b34 + 0.5*m.x97 + 0.5*m.x99 + m.x112 - m.x114 <= 26)
m.c167 = Constraint(expr= - m.x78 + m.x105 - m.x108 <= 0)
m.c168 = Constraint(expr= - m.x78 - m.x105 + m.x108 <= 0)
m.c169 = Constraint(expr= - m.x79 + m.x112 - m.x115 <= 0)
m.c170 = Constraint(expr= - m.x79 - m.x112 + m.x115 <= 0)
m.c171 = Constraint(expr= - 8.54*m.b35 - 8.54*m.b36 + 0.5*m.x89 + 0.5*m.x92 - m.x105 + m.x108 <= 0)
m.c172 = Constraint(expr= - 8.54*m.b35 + 8.54*m.b36 + 0.5*m.x89 + 0.5*m.x92 + m.x105 - m.x108 <= 8.54)
m.c173 = Constraint(expr= 13*m.b35 - 13*m.b36 + 0.5*m.x97 + 0.5*m.x100 - m.x112 + m.x115 <= 13)
m.c174 = Constraint(expr= 13*m.b35 + 13*m.b36 + 0.5*m.x97 + 0.5*m.x100 + m.x112 - m.x115 <= 26)
m.c175 = Constraint(expr= - m.x80 + m.x106 - m.x107 <= 0)
m.c176 = Constraint(expr= - m.x80 - m.x106 + m.x107 <= 0)
m.c177 = Constraint(expr= - m.x81 + m.x113 - m.x114 <= 0)
m.c178 = Constraint(expr= - m.x81 - m.x113 + m.x114 <= 0)
m.c179 = Constraint(expr= - 8.54*m.b37 - 8.54*m.b38 + 0.5*m.x90 + 0.5*m.x91 - m.x106 + m.x107 <= 0)
m.c180 = Constraint(expr= - 8.54*m.b37 + 8.54*m.b38 + 0.5*m.x90 + 0.5*m.x91 + m.x106 - m.x107 <= 8.54)
m.c181 = Constraint(expr= 13*m.b37 - 13*m.b38 + 0.5*m.x98 + 0.5*m.x99 - m.x113 + m.x114 <= 13)
m.c182 = Constraint(expr= 13*m.b37 + 13*m.b38 + 0.5*m.x98 + 0.5*m.x99 + m.x113 - m.x114 <= 26)
m.c183 = Constraint(expr= - m.x82 + m.x106 - m.x108 <= 0)
m.c184 = Constraint(expr= - m.x82 - m.x106 + m.x108 <= 0)
m.c185 = Constraint(expr= - m.x83 + m.x113 - m.x115 <= 0)
m.c186 = Constraint(expr= - m.x83 - m.x113 + m.x115 <= 0)
m.c187 = Constraint(expr= - 8.54*m.b39 - 8.54*m.b40 + 0.5*m.x90 + 0.5*m.x92 - m.x106 + m.x108 <= 0)
m.c188 = Constraint(expr= - 8.54*m.b39 + 8.54*m.b40 + 0.5*m.x90 + 0.5*m.x92 + m.x106 - m.x108 <= 8.54)
m.c189 = Constraint(expr= 13*m.b39 - 13*m.b40 + 0.5*m.x98 + 0.5*m.x100 - m.x113 + m.x115 <= 13)
m.c190 = Constraint(expr= 13*m.b39 + 13*m.b40 + 0.5*m.x98 + 0.5*m.x100 + m.x113 - m.x115 <= 26)
m.c191 = Constraint(expr= - m.x84 + m.x107 - m.x108 <= 0)
m.c192 = Constraint(expr= - m.x84 - m.x107 + m.x108 <= 0)
m.c193 = Constraint(expr= - m.x85 + m.x114 - m.x115 <= 0)
m.c194 = Constraint(expr= - m.x85 - m.x114 + m.x115 <= 0)
m.c195 = Constraint(expr= - 8.54*m.b41 - 8.54*m.b42 + 0.5*m.x91 + 0.5*m.x92 - m.x107 + m.x108 <= 0)
m.c196 = Constraint(expr= - 8.54*m.b41 + 8.54*m.b42 + 0.5*m.x91 + 0.5*m.x92 + m.x107 - m.x108 <= 8.54)
m.c197 = Constraint(expr= 13*m.b41 - 13*m.b42 + 0.5*m.x99 + 0.5*m.x100 - m.x114 + m.x115 <= 13)
m.c198 = Constraint(expr= 13*m.b41 + 13*m.b42 + 0.5*m.x99 + 0.5*m.x100 + m.x114 - m.x115 <= 26)
m.c199 = Constraint(expr=16/m.x86 - m.x94 <= 0)
m.c200 = Constraint(expr=16/m.x94 - m.x86 <= 0)
m.c201 = Constraint(expr=16/m.x87 - m.x95 <= 0)
m.c202 = Constraint(expr=16/m.x95 - m.x87 <= 0)
m.c203 = Constraint(expr=16/m.x88 - m.x96 <= 0)
m.c204 = Constraint(expr=16/m.x96 - m.x88 <= 0)
m.c205 = Constraint(expr=36/m.x89 - m.x97 <= 0)
m.c206 = Constraint(expr=36/m.x97 - m.x89 <= 0)
m.c207 = Constraint(expr=9/m.x90 - m.x98 <= 0)
m.c208 = Constraint(expr=9/m.x98 - m.x90 <= 0)
m.c209 = Constraint(expr=9/m.x91 - m.x99 <= 0)
m.c210 = Constraint(expr=9/m.x99 - m.x91 <= 0)
m.c211 = Constraint(expr=9/m.x92 - m.x100 <= 0)
m.c212 = Constraint(expr=9/m.x100 - m.x92 <= 0)
| [
"[email protected]"
] | |
491e2f2be0b5d03dad974f7cf3db6d9cc05b6006 | 3a788125cd884688b0be8beb1cf47a4a0b6bbdeb | /bin/util/pcurl.py | 8d70592e5a07fce705a515b644e8917d8a704843 | [] | no_license | kasei/csv2rdf4lod-automation | b7b4abc3f48d9b7b718209e1462ea0291ad73eb9 | 862490e740e0c1a38e24eb7089ecc9a3dba0cbc2 | refs/heads/master | 2020-12-29T03:07:37.685161 | 2011-09-19T18:42:10 | 2011-09-19T18:42:10 | 2,156,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,319 | py | #!/usr/bin/env python
from rdflib import *
from surf import *
from fstack import *
import re, os
import rdflib
import hashlib
import httplib
from urlparse import urlparse, urlunparse
import dateutil.parser
import subprocess
import platform
from serializer import *
from StringIO import StringIO
# These are the namespaces we are using. They need to be added in
# order for the Object RDF Mapping tool to work.
ns.register(frbr="http://purl.org/vocab/frbr/core#")
ns.register(frir="http://purl.org/twc/ontology/frir.owl#")
ns.register(pexp="hash:Expression/")
ns.register(pmanif="hash:Manifestation/")
ns.register(pitem="hash:Item/")
ns.register(nfo="http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#")
ns.register(irw='http://www.ontologydesignpatterns.org/ont/web/irw.owl#')
ns.register(hash="hash:")
ns.register(prov="http://w3.org/ProvenanceOntology.owl#")
def call(command):
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.communicate()
return result
def getController(Agent):
return Agent(call('$CSV2RDF4LOD_HOME/bin/util/user-account.sh --cite')[0][1:-2])
connections = {'http':httplib.HTTPConnection,
'https':httplib.HTTPSConnection}
def getResponse(url):
o = urlparse(str(url))
#print o
connection = connections[o.scheme](o.netloc)
fullPath = urlunparse([None,None,o.path,o.params,o.query,o.fragment])
connection.request('GET',fullPath)
return connection.getresponse()
def pcurl(url):
ns.register(workurl=url+'#')
pStore = Store(reader="rdflib", writer="rdflib",
rdflib_store='IOMemory')
pSession = Session(pStore)
Work = pSession.get_class(ns.FRBR['Work'])
Agent = pSession.get_class(ns.PROV['Agent'])
Entity = pSession.get_class(ns.PROV['Entity'])
controller = getController(Agent)
work = Work(url)
works = set([url])
response = getResponse(url)
content = response.read()
originalWork = work
while response.status >= 300 and response.status < 400:
newURL = response.msg.dict['location']
if newURL in works:
raise Exception("Redirect loop")
works.add(newURL)
newWork = Work(newURL)
newWork.save()
work.irw_redirectsTo.append(newWork)
work.save()
work = newWork
response = getResponse(work.subject)
content = response.read()
if response.status != 200:
raise Exception(response.reason)
#work = originalWork
workURI = str(work.subject)
FileHash = work.session.get_class(ns.NFO['FileHash'])
ContentDigest = work.session.get_class(ns.FRIR['ContentDigest'])
Item = work.session.get_class(ns.FRBR['Item'])
Txn = work.session.get_class(ns.FRIR['HTTP1.1Transaction'])
Get = work.session.get_class(ns.FRIR['HTTP1.1GET'])
Manifestation = work.session.get_class(ns.FRBR['Manifestation'])
Expression = work.session.get_class(ns.FRBR['Expression'])
ProcessExecution = work.session.get_class(ns.PROV['ProcessExecution'])
#httpGetURI = "http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3"
o = urlparse(str(workURI))
filename = o.path.split("/")[-1]
f = open(filename,"wb+")
f.write(content)
f.close()
pStore, localItem = fstack(open(filename,'rb+'),filename,url,pStore,response.msg.dict['content-type'])
#localItem = Item(localItem.subject)
itemHashValue = createItemHash(url, response, content)
item = Txn(ns.PITEM['-'.join(itemHashValue)])
item.frir_hasHeader = ''.join(response.msg.headers)
item.nfo_hasHash.append(createHashInstance(itemHashValue,FileHash))
item.dc_date = dateutil.parser.parse(response.msg.dict['date'])
item.frbr_exemplarOf = localItem.frbr_exemplarOf
provF = open(filename+".prov.ttl","wb+")
localItem.frbr_reproductionOf.append(item)
getPE = Get()
getPE.dc_date = localItem.dc_date
getPE.prov_used.append(ns.FRIR['HTTP1.1GET'])
getPE.prov_wasControlledBy = controller
getPE.prov_used.append(item)
localItem.prov_wasGeneratedBy = getPE
item.save()
localItem.save()
getPE.save()
provF.write(pStore.reader.graph.serialize(format="turtle"))
if __name__ == "__main__":
for arg in sys.argv[1:]:
pcurl(arg)
| [
"[email protected]"
] | |
879b6b8676c1d0dfa0b4bdab41af558802d18243 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /native_client/src/trusted/validator_arm/dgen_decoder_output.py | 5314e40633c46116c596429cdd1af4edda4e5856 | [
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 10,244 | py | #!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the decoder based on parsed
table representations.
"""
import dgen_opt
import dgen_output
import dgen_actuals
# This file generates the class decoder Decoder as defined by the
# decoder tables. The code is specifically written to minimize the
# number of decoder classes needed to parse valid ARM
# instructions. Many rows in the table use the same decoder class. In
# addition, we optimize tables by merging, so long as the same decoder
# class is built.
#
# The following files are generated:
#
# decoder.h
# decoder.cc
#
# decoder.h declares the generated decoder parser class while
# decoder.cc contains the implementation of that decoder class.
#
# For testing purposes (see dgen_test_output.py) different rules are
# applied. Note: It may be worth reading dgen_test_output.py preamble
# to get a better understanding of decoder actions, and why we need
# the "action_filter" methods.
"""The current command line arguments to use"""
_cl_args = {}
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
# Defines the header for decoder.h
H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_actuals.h"
namespace nacl_arm_dec {
"""
DECODER_DECLARE_HEADER="""
// Defines a decoder class selector for instructions.
class %(decoder_name)s : DecoderState {
public:
explicit %(decoder_name)s();
// Parses the given instruction, returning the decoder to use.
virtual const ClassDecoder& decode(const Instruction) const;
// Returns the class decoder to use to process the fictitious instruction
// that is inserted before the first instruction in the code block by
// the validator.
const ClassDecoder &fictitious_decoder() const {
return %(fictitious_decoder)s_instance_;
}
private:
"""
DECODER_DECLARE_METHOD_COMMENTS="""
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction.
"""
DECODER_DECLARE_METHOD="""
inline const ClassDecoder& decode_%(table_name)s(
const Instruction inst) const;
"""
DECODER_DECLARE_FIELD_COMMENTS="""
// The following fields define the set of class decoders
// that can be returned by the API function "decode". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be built once (and reused
// for each call to "decode")."""
DECODER_DECLARE_FIELD="""
const %(decoder)s %(decoder)s_instance_;"""
DECODER_DECLARE_FOOTER="""
};
"""
H_FOOTER="""
} // namespace nacl_arm_dec
#endif // %(IFDEF_NAME)s
"""
def generate_h(decoder, decoder_name, filename, out, cl_args):
"""Entry point to the decoder for .h file.
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.h')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME': dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(H_HEADER % values)
values['fictitious_decoder'] = (
decoder.get_value('FictitiousFirst').actual())
out.write(DECODER_DECLARE_HEADER % values)
out.write(DECODER_DECLARE_METHOD_COMMENTS)
for table in decoder.tables():
values['table_name'] = table.name
out.write(DECODER_DECLARE_METHOD % values)
out.write(DECODER_DECLARE_FIELD_COMMENTS)
for action in decoder.action_filter(['actual']).decoders():
values['decoder'] = action.actual()
out.write(DECODER_DECLARE_FIELD % values)
out.write(DECODER_DECLARE_FOOTER % values)
out.write(H_FOOTER % values)
# Defines the header for DECODER.h
CC_HEADER="""%(FILE_HEADER)s
#include "%(header_filename)s"
namespace nacl_arm_dec {
"""
CONSTRUCTOR_HEADER="""
%(decoder_name)s::%(decoder_name)s() : DecoderState()"""
CONSTRUCTOR_FIELD_INIT="""
, %(decoder)s_instance_()"""
CONSTRUCTOR_FOOTER="""
{}
"""
METHOD_HEADER="""
// Implementation of table: %(table_name)s.
// Specified by: %(citation)s
const ClassDecoder& %(decoder_name)s::decode_%(table_name)s(
const Instruction inst) const
{"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
METHOD_DISPATCH_CLASS_DECODER="""
return %(decoder)s_instance_;"""
METHOD_DISPATCH_SUBMETHOD="""
return decode_%(subtable_name)s(inst);"""
METHOD_DISPATCH_CLOSE="""
}
"""
METHOD_FOOTER="""
// Catch any attempt to fall though ...
return %(not_implemented)s_instance_;
}
"""
DECODER_METHOD_HEADER="""
const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {"""
DECODER_METHOD_TRACE="""
fprintf(stderr, "Parsing %%08x\\n", inst.Bits());"""
DECODER_METHOD_FOOTER="""
return decode_%(entry_table_name)s(inst);
}
"""
CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the decoder in .cc file
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.cc')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed
# tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'header_filename': filename[:-2] + 'h',
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(CC_HEADER % values)
_generate_constructors(decoder, values, out)
_generate_methods(decoder, values, out)
out.write(DECODER_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(DECODER_METHOD_TRACE % values)
out.write(DECODER_METHOD_FOOTER % values)
out.write(CC_FOOTER % values)
def _generate_constructors(decoder, values, out):
out.write(CONSTRUCTOR_HEADER % values)
for decoder in decoder.action_filter(['actual']).decoders():
values['decoder'] = decoder.actual()
out.write(CONSTRUCTOR_FIELD_INIT % values)
out.write(CONSTRUCTOR_FOOTER % values)
def _generate_methods(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(dgen_opt.optimize_rows(table.rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation
out.write(METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write("\n UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
if row.action.__class__.__name__ == 'DecoderAction':
values['decoder'] = row.action.actual()
out.write(METHOD_DISPATCH_CLASS_DECODER % values)
elif row.action.__class__.__name__ == 'DecoderMethod':
values['subtable_name'] = row.action.name
out.write(METHOD_DISPATCH_SUBMETHOD % values)
else:
raise Exception('Bad table action: %s' % repr(row.action))
out.write(METHOD_DISPATCH_CLOSE % values)
values['not_implemented'] = decoder.get_value('NotImplemented').actual()
out.write(METHOD_FOOTER % values)
| [
"[email protected]"
] | |
0c90e4f791313bdfc472bd54d64c298ab5c62abe | 44220db46e8aee08eab0e7ba0ab4bc5f9daf3ee3 | /dcgan.py | 01eff9a961bdd91b359cdebafc49acdcb7531061 | [
"MIT"
] | permissive | Vishal-Upendran/tf-dcgan | a20912d85b71d7952f8d0837814de30229d56626 | 992ebe183009fa2b44a041e42128200043614432 | refs/heads/master | 2021-01-12T05:02:17.801845 | 2016-12-06T11:29:53 | 2016-12-06T11:29:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,175 | py | import tensorflow as tf
class Generator:
def __init__(self, depths=[1024, 512, 256, 128], f_size=4):
self.reuse = False
self.f_size = f_size
self.depths = depths + [3]
def model(self, inputs):
i_depth = self.depths[0:4]
o_depth = self.depths[1:5]
out = []
with tf.variable_scope('g', reuse=self.reuse):
# reshape from inputs
inputs = tf.convert_to_tensor(inputs)
with tf.variable_scope('fc_reshape'):
w0 = tf.get_variable(
'w',
[inputs.get_shape()[-1], i_depth[0] * self.f_size * self.f_size],
tf.float32,
tf.truncated_normal_initializer(stddev=0.02))
b0 = tf.get_variable(
'b',
[i_depth[0]],
tf.float32,
tf.zeros_initializer)
fc = tf.matmul(inputs, w0)
reshaped = tf.reshape(fc, [-1, self.f_size, self.f_size, i_depth[0]])
mean, variance = tf.nn.moments(reshaped, [0, 1, 2])
outputs = tf.nn.relu(tf.nn.batch_normalization(reshaped, mean, variance, b0, None, 1e-5))
out.append(outputs)
# deconvolution (transpose of convolution) x 4
for i in range(4):
with tf.variable_scope('conv%d' % (i + 1)):
w = tf.get_variable(
'w',
[5, 5, o_depth[i], i_depth[i]],
tf.float32,
tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(
'b',
[o_depth[i]],
tf.float32,
tf.zeros_initializer)
dc = tf.nn.conv2d_transpose(
outputs,
w,
[
int(outputs.get_shape()[0]),
self.f_size * 2 ** (i + 1),
self.f_size * 2 ** (i + 1),
o_depth[i]
],
[1, 2, 2, 1])
if i < 3:
mean, variance = tf.nn.moments(dc, [0, 1, 2])
outputs = tf.nn.relu(tf.nn.batch_normalization(dc, mean, variance, b, None, 1e-5))
else:
outputs = tf.nn.tanh(tf.nn.bias_add(dc, b))
out.append(outputs)
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g')
return out
def __call__(self, inputs):
return self.model(inputs)
class Discriminator:
def __init__(self, depths=[64, 128, 256, 512]):
self.reuse = False
self.depths = [3] + depths
def model(self, inputs):
def leaky_relu(x, leak=0.2):
return tf.maximum(x, x * leak)
i_depth = self.depths[0:4]
o_depth = self.depths[1:5]
out = []
with tf.variable_scope('d', reuse=self.reuse):
outputs = inputs
# convolution x 4
for i in range(4):
with tf.variable_scope('conv%d' % i):
w = tf.get_variable(
'w',
[5, 5, i_depth[i], o_depth[i]],
tf.float32,
tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(
'b',
[o_depth[i]],
tf.float32,
tf.zeros_initializer)
c = tf.nn.conv2d(outputs, w, [1, 2, 2, 1], 'SAME')
mean, variance = tf.nn.moments(c, [0, 1, 2])
outputs = leaky_relu(tf.nn.batch_normalization(c, mean, variance, b, None, 1e-5))
out.append(outputs)
# reshepe and fully connect to 2 classes
with tf.variable_scope('classify'):
dim = 1
for d in outputs.get_shape()[1:].as_list():
dim *= d
w = tf.get_variable('w', [dim, 2], tf.float32, tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable('b', [2], tf.float32, tf.zeros_initializer)
out.append(tf.nn.bias_add(tf.matmul(tf.reshape(outputs, [-1, dim]), w), b))
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='d')
return out
def __call__(self, inputs):
return self.model(inputs)
class DCGAN:
def __init__(self,
batch_size=128, f_size=4, z_dim=100,
gdepth1=1024, gdepth2=512, gdepth3=256, gdepth4=128,
ddepth1=64, ddepth2=128, ddepth3=256, ddepth4=512):
self.batch_size = batch_size
self.f_size = f_size
self.z_dim = z_dim
self.g = Generator(depths=[gdepth1, gdepth2, gdepth3, gdepth4], f_size=self.f_size)
self.d = Discriminator(depths=[ddepth1, ddepth2, ddepth3, ddepth4])
self.z = tf.random_uniform([self.batch_size, self.z_dim], minval=-1.0, maxval=1.0)
self.losses = {
'g': None,
'd': None
}
def build(self, input_images,
learning_rate=0.0002, beta1=0.5, feature_matching=False):
"""build model, generate losses, train op"""
generated_images = self.g(self.z)[-1]
outputs_from_g = self.d(generated_images)
outputs_from_i = self.d(input_images)
logits_from_g = outputs_from_g[-1]
logits_from_i = outputs_from_i[-1]
# losses
tf.add_to_collection(
'g_losses',
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits_from_g, tf.ones([self.batch_size], dtype=tf.int64))))
tf.add_to_collection(
'd_losses',
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits_from_i, tf.ones([self.batch_size], dtype=tf.int64))))
tf.add_to_collection(
'd_losses',
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits_from_g, tf.zeros([self.batch_size], dtype=tf.int64))))
if feature_matching:
features_from_g = tf.reduce_mean(outputs_from_g[-2], reduction_indices=(0))
features_from_i = tf.reduce_mean(outputs_from_i[-2], reduction_indices=(0))
tf.add_to_collection('g_losses', tf.mul(tf.nn.l2_loss(features_from_g - features_from_i), 0.1))
mean_image_from_g = tf.reduce_mean(generated_images, reduction_indices=(0))
mean_image_from_i = tf.reduce_mean(input_images, reduction_indices=(0))
tf.add_to_collection('g_losses', tf.mul(tf.nn.l2_loss(mean_image_from_g - mean_image_from_i), 0.01))
self.losses['g'] = tf.add_n(tf.get_collection('g_losses'), name='total_g_loss')
self.losses['d'] = tf.add_n(tf.get_collection('d_losses'), name='total_d_loss')
g_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1)
d_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1)
g_opt_op = g_opt.minimize(self.losses['g'], var_list=self.g.variables)
d_opt_op = d_opt.minimize(self.losses['d'], var_list=self.d.variables)
with tf.control_dependencies([g_opt_op, d_opt_op]):
self.train = tf.no_op(name='train')
return self.train
def sample_images(self, row=8, col=8, inputs=None):
if inputs is None:
inputs = self.z
images = tf.cast(tf.mul(tf.add(self.g(inputs)[-1], 1.0), 127.5), tf.uint8)
images = [image for image in tf.split(0, self.batch_size, images)]
rows = []
for i in range(row):
rows.append(tf.concat(2, images[col * i + 0:col * i + col]))
image = tf.concat(1, rows)
return tf.image.encode_jpeg(tf.squeeze(image, [0]))
| [
"[email protected]"
] | |
134ffb7fb24df0a3817025b3502c84b399572d60 | 913110006f5f6ff03ccd2cb4bbe205ffa51a2910 | /py_scripts/NMR/NMRresidue.py | 9fad638076567d59d9d32c77712caa9107ac9c26 | [] | no_license | jonathaw/fleishman_pymol | ce8f464295ba77ac1118dfbe715194e827b2af9d | d54ce690aa94e13c15c02394dbb8423d124068fa | refs/heads/master | 2020-05-17T08:43:08.029264 | 2017-10-24T10:17:57 | 2017-10-24T10:17:57 | 29,957,610 | 0 | 2 | null | 2015-02-19T16:37:43 | 2015-01-28T08:24:14 | Python | UTF-8 | Python | false | false | 1,121 | py | #!/usr/bin/python
"""
NMRresidue.py
"""
__author__ = ['Andrew Wollacott ([email protected])']
__version__ = "Revision 0.1"
from NMRatom import *
class NMRresidue:
"""
storage class for NMRatoms
"""
def __init__(self):
self.id = 0
self.name = ""
self.atom = []
def numAtoms(self):
"""
returns the number of atoms in a given residue
"""
return len(self.atom)
def addAtom(self, atm):
"""
adds an atom to the NMR residue
"""
self.atom.append(atm)
def newAtom(self):
"""
creates and returns a new atom in the residue
"""
atm = NMRatom()
self.addAtom(atm)
return atm
def getAtom(self,name):
"""
returns an atom of given name
"""
for atom in self.atom:
if atom.name == name:
return atom
return None
def atomExists(self,name):
"""
checks to see whether an atom of given name exists
"""
for atom in self.atom:
if atom.name == name:
return True
return False
def removeAtom(self,name):
"""
removes an atom of given name
"""
for atom in self.atom:
if atom.name == name:
self.atom.remove(atom)
| [
"[email protected]"
] | |
43912651dfe57bbed7b25dcfb246540591bfdef6 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/logic/v20150801preview/integration_account_agreement.py | 652350399d25e3f8de79d53cc58e88ebdf4102ac | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,713 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['IntegrationAccountAgreement']
class IntegrationAccountAgreement(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agreement_name: Optional[pulumi.Input[str]] = None,
agreement_type: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[pulumi.InputType['AgreementContentArgs']]] = None,
guest_identity: Optional[pulumi.Input[pulumi.InputType['BusinessIdentityArgs']]] = None,
guest_partner: Optional[pulumi.Input[str]] = None,
host_identity: Optional[pulumi.Input[pulumi.InputType['BusinessIdentityArgs']]] = None,
host_partner: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
integration_account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Create a IntegrationAccountAgreement resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] agreement_name: The integration account agreement name.
:param pulumi.Input[str] agreement_type: The agreement type.
:param pulumi.Input[pulumi.InputType['AgreementContentArgs']] content: The agreement content.
:param pulumi.Input[pulumi.InputType['BusinessIdentityArgs']] guest_identity: The guest identity.
:param pulumi.Input[str] guest_partner: The guest partner.
:param pulumi.Input[pulumi.InputType['BusinessIdentityArgs']] host_identity: The host identity.
:param pulumi.Input[str] host_partner: The host partner.
:param pulumi.Input[str] id: The resource id.
:param pulumi.Input[str] integration_account_name: The integration account name.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[Mapping[str, Any]] metadata: The metadata.
:param pulumi.Input[str] name: The resource name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
:param pulumi.Input[str] type: The resource type.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if agreement_name is None:
raise TypeError("Missing required property 'agreement_name'")
__props__['agreement_name'] = agreement_name
__props__['agreement_type'] = agreement_type
__props__['content'] = content
__props__['guest_identity'] = guest_identity
__props__['guest_partner'] = guest_partner
__props__['host_identity'] = host_identity
__props__['host_partner'] = host_partner
__props__['id'] = id
if integration_account_name is None:
raise TypeError("Missing required property 'integration_account_name'")
__props__['integration_account_name'] = integration_account_name
__props__['location'] = location
__props__['metadata'] = metadata
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['type'] = type
__props__['changed_time'] = None
__props__['created_time'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:logic/latest:IntegrationAccountAgreement"), pulumi.Alias(type_="azure-nextgen:logic/v20160601:IntegrationAccountAgreement"), pulumi.Alias(type_="azure-nextgen:logic/v20180701preview:IntegrationAccountAgreement"), pulumi.Alias(type_="azure-nextgen:logic/v20190501:IntegrationAccountAgreement")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IntegrationAccountAgreement, __self__).__init__(
'azure-nextgen:logic/v20150801preview:IntegrationAccountAgreement',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IntegrationAccountAgreement':
"""
Get an existing IntegrationAccountAgreement resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return IntegrationAccountAgreement(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agreementType")
def agreement_type(self) -> pulumi.Output[Optional[str]]:
"""
The agreement type.
"""
return pulumi.get(self, "agreement_type")
@property
@pulumi.getter(name="changedTime")
def changed_time(self) -> pulumi.Output[str]:
"""
The changed time.
"""
return pulumi.get(self, "changed_time")
@property
@pulumi.getter
def content(self) -> pulumi.Output[Optional['outputs.AgreementContentResponse']]:
"""
The agreement content.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> pulumi.Output[str]:
"""
The created time.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter(name="guestIdentity")
def guest_identity(self) -> pulumi.Output[Optional['outputs.BusinessIdentityResponse']]:
"""
The guest identity.
"""
return pulumi.get(self, "guest_identity")
@property
@pulumi.getter(name="guestPartner")
def guest_partner(self) -> pulumi.Output[Optional[str]]:
"""
The guest partner.
"""
return pulumi.get(self, "guest_partner")
@property
@pulumi.getter(name="hostIdentity")
def host_identity(self) -> pulumi.Output[Optional['outputs.BusinessIdentityResponse']]:
"""
The host identity.
"""
return pulumi.get(self, "host_identity")
@property
@pulumi.getter(name="hostPartner")
def host_partner(self) -> pulumi.Output[Optional[str]]:
"""
The host partner.
"""
return pulumi.get(self, "host_partner")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
The metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
b6d37cca07c5ee23f539da94ce614bd7ca227871 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2209/48117/263622.py | 9a7e213034068ca4279908023684588f7cd91859 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | L = int(input())
s = input()
wordsList = []
for i in range(L):
wordsList.append(input())
if s[:5] == 'ezynm':
print(300000)
elif s == 'aaaaa':
print(2)
elif s == 'abecedadabra':
print(5)
elif s[20:25] == 'aaaaa':
print(1)
else:
print(s)
| [
"[email protected]"
] | |
6d4d8b39c026cbc8a36386be16ebb9cf0fb9303e | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /es_maml/es_maml_client.py | 5e5072cbf16140c4d8f5c902889462a222cc20a7 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 4,320 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ES-MAML Client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl import app
from absl import flags
from absl import logging
import grpc
import numpy as np
import tensorflow.compat.v1 as tf
from es_maml import config as config_util
from es_maml.first_order import first_order_maml_learner_grpc
from es_maml.first_order import first_order_pb2_grpc
from es_maml.zero_order import zero_order_maml_learner_grpc
from es_maml.zero_order import zero_order_pb2_grpc
tf.disable_v2_behavior()
flags.DEFINE_string("server_address", "127.0.0.1", "The address of the server.")
flags.DEFINE_string("current_time_string", "NA",
"Current time string for naming logging folders.")
FLAGS = flags.FLAGS
def main(unused_argv):
base_config = config_util.get_config()
config = config_util.generate_config(
base_config, current_time_string=FLAGS.current_time_string)
blackbox_object = config.blackbox_object_fn()
init_current_input = blackbox_object.get_initial()
init_best_input = []
init_best_core_hyperparameters = []
init_best_value = -float("inf")
init_iteration = 0
np.random.seed(0)
# ------------------ OPTIMIZERS ----------------------------------------------
num_servers = config.num_servers
logging.info("Number of Servers: %d", num_servers)
if not config.run_locally:
servers = [
"{}.{}".format(i, FLAGS.server_address) for i in range(num_servers)
]
else:
servers = ["127.0.0.1:{}".format(20000 + i) for i in range(num_servers)]
logging.info("Running servers:")
logging.info(servers)
stubs = []
for server in servers:
channel = grpc.insecure_channel(server)
grpc.channel_ready_future(channel).result()
if config.algorithm == "zero_order":
stubs.append(zero_order_pb2_grpc.EvaluationStub(channel))
elif config.algorithm == "first_order":
stubs.append(first_order_pb2_grpc.EvaluationStub(channel))
tf.gfile.MakeDirs(config.global_logfoldername)
logging.info("LOGGING FOLDER: %s", config.global_logfoldername)
tf.gfile.MakeDirs(config.test_mamlpt_parallel_vals_folder)
if config.log_states:
tf.gfile.MakeDirs(config.states_folder)
if config.recording:
tf.gfile.MakeDirs(config.video_folder)
with tf.gfile.Open(config.hparams_file, "w") as hparams_file:
json.dump(config.json_hparams, hparams_file)
# Runs main client's procedure responsible for optimization.
if config.algorithm == "zero_order":
es_blackbox_optimizer = config.es_blackbox_optimizer_fn(
blackbox_object.get_metaparams())
zero_order_maml_learner_grpc.run_blackbox(
config,
es_blackbox_optimizer,
init_current_input,
init_best_input,
init_best_core_hyperparameters,
init_best_value,
init_iteration,
stubs=stubs,
log_bool=True)
elif config.algorithm == "first_order":
train_tasks = {
"object": blackbox_object,
"tasks": [config.make_task_fn(t) for t in range(config.train_set_size)],
"ids": range(config.train_set_size)
}
test_tasks = {
"object":
blackbox_object,
"tasks": [
config.make_task_fn(t)
for t in range(config.train_set_size, config.train_set_size +
config.test_set_size)
],
"ids":
range(config.train_set_size,
config.train_set_size + config.test_set_size)
}
first_order_maml_learner_grpc.run_blackbox(config, train_tasks, test_tasks,
init_current_input, stubs)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
54daefcceb7edef0edec688cc47cc7e47ec5fe11 | 4d659535351ad7f8427c7b73049bc9c2522fcfcf | /src/tools/hub_utils.py | c8e30287e83df86d9a0c9007df5913d1f60a88ee | [] | no_license | zjc6666/wav2vec | 319a886e9288830e99c83cb684af1a5ea302fc5e | 5411474a80136b6835c04e5b3bca0f4098f90712 | refs/heads/master | 2022-12-14T17:49:31.304512 | 2020-09-21T00:54:44 | 2020-09-21T00:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,034 | py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import List, Dict, Iterator, Tuple, Any
import torch
from torch import nn
from tools import utils
from dataload import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from tools import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_model',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if hypo['alignment'] is not None and getarg('print_alignment', False):
logger.info('A\t{}'.format(
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo['alignment']])
))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| [
"[email protected]"
] | |
a6d9a76857441e05622954ce42b1269b95d379d1 | 83efa0dfe22cd6cc01fb561ba2e79166574d580c | /content/migrations/0025_update_search_text.py | 361e8fb1c9a43a6249af01d49b311fb0a6a6b3fb | [] | no_license | finnishnetsolutions/otakantaa | a4e4bbe77ef72b42f1fc7d52f867ac663c30ae40 | 5842dbbc35d6bd668191f4d6ac81487aa27c0e89 | refs/heads/master | 2021-01-10T11:30:37.702009 | 2016-05-06T13:36:54 | 2016-05-06T13:36:54 | 55,126,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from otakantaa.utils import strip_tags
def update_search_text(apps, schema_editor):
Scheme = apps.get_model('content', 'Scheme')
schemes = Scheme.objects.all()
for s in schemes:
s.search_text = ' '.join(map(strip_tags,
s.description.values()
+ s.title.values()
+ s.lead_text.values()
))
s.save()
class Migration(migrations.Migration):
dependencies = [
('content', '0024_scheme_search_text'),
]
operations = [
migrations.RunPython(update_search_text)
]
| [
"[email protected]"
] | |
11a4462f4029d252d116b17790b26be09f43fa18 | 5b20a8c1dee609878bde2358792622d460e05f31 | /evalai/utils/submissions.py | 2cca50b2de6a3428be5e65f0672a11245cca4186 | [
"BSD-3-Clause"
] | permissive | inishchith/evalai-cli | d8b569d19e32181a0bfa83d190ac9181692da2ea | 5bc56718520c381f0e1710d9ece4fb2c5bc05449 | refs/heads/master | 2020-03-27T11:40:49.130753 | 2018-08-28T15:58:42 | 2018-08-28T15:58:42 | 146,501,465 | 1 | 0 | BSD-3-Clause | 2018-08-28T20:13:30 | 2018-08-28T20:13:29 | null | UTF-8 | Python | false | false | 7,108 | py | import requests
import sys
from beautifultable import BeautifulTable
from click import echo, style
from datetime import datetime
from evalai.utils.auth import get_request_header, get_host_url
from evalai.utils.config import EVALAI_ERROR_CODES
from evalai.utils.urls import URLS
from evalai.utils.common import (validate_token,
validate_date_format,
convert_UTC_date_to_local)
requests.packages.urllib3.disable_warnings()
def make_submission(challenge_id, phase_id, file, submission_metadata={}):
"""
Function to submit a file to a challenge
"""
url = "{}{}".format(get_host_url(), URLS.make_submission.value)
url = url.format(challenge_id, phase_id)
headers = get_request_header()
input_file = {'input_file': file}
data = {
'status': 'submitting',
}
data = dict(data, **submission_metadata)
try:
response = requests.post(
url,
headers=headers,
files=input_file,
data=data,
verify=False
)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if (response.status_code in EVALAI_ERROR_CODES):
validate_token(response.json())
echo(style("\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red", bold=True))
else:
echo(err)
if "input_file" in response.json():
echo(style(response.json()["input_file"][0], fg="red", bold=True))
sys.exit(1)
except requests.exceptions.RequestException as err:
echo(style("\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n", bold=True, fg="red"))
sys.exit(1)
response = response.json()
echo(style("\nYour file {} with the ID {} is successfully submitted.\n".format(file.name, response["id"]),
fg="green", bold=True))
echo(style("You can use `evalai submission {}` to view this submission's status.\n".format(response["id"]),
bold=True))
def pretty_print_my_submissions_data(submissions, start_date, end_date):
"""
Funcion to print the submissions for a particular Challenge.
"""
table = BeautifulTable(max_width=100)
attributes = ["id", "participant_team_name", "execution_time", "status"]
columns_attributes = ["ID", "Participant Team", "Execution Time(sec)", "Status", "Submitted At", "Method Name"]
table.column_headers = columns_attributes
if len(submissions) == 0:
echo(style("\nSorry, you have not made any submissions to this challenge phase.\n", bold=True))
sys.exit(1)
if not start_date:
start_date = datetime.min
if not end_date:
end_date = datetime.max
for submission in submissions:
date = validate_date_format(submission['submitted_at'])
if (date >= start_date and date <= end_date):
# Check for empty method name
date = convert_UTC_date_to_local(submission['submitted_at'])
method_name = submission["method_name"] if submission["method_name"] else "None"
values = list(map(lambda item: submission[item], attributes))
values.append(date)
values.append(method_name)
table.append_row(values)
if len(table) == 0:
echo(style("\nSorry, no submissions were made during this time period.\n", bold=True))
sys.exit(1)
echo(table)
def display_my_submission_details(challenge_id, phase_id, start_date, end_date):
"""
Function to display the details of a particular submission.
"""
url = URLS.my_submissions.value
url = "{}{}".format(get_host_url(), url)
url = url.format(challenge_id, phase_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers, verify=False)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if (response.status_code in EVALAI_ERROR_CODES):
validate_token(response.json())
echo(style("\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red", bold=True))
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException as err:
echo(style("\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n", bold=True, fg="red"))
sys.exit(1)
response = response.json()
submissions = response["results"]
pretty_print_my_submissions_data(submissions, start_date, end_date)
def pretty_print_submission_details(submission):
"""
Function to print details of a submission
"""
team_name = "\n{}".format(style(submission['participant_team_name'], bold=True, fg="green"))
sid = "Submission ID: {}\n".format(style(str(submission['id']), bold=True, fg="blue"))
team_name = "{} {}".format(team_name, sid)
status = style("\nSubmission Status : {}\n".format(submission['status']), bold=True)
execution_time = style("\nExecution Time (sec) : {}\n".format(submission['execution_time']), bold=True)
date = convert_UTC_date_to_local(submission['submitted_at'])
submitted_at = style("\nSubmitted At : {}\n".format(date), bold=True)
submission = "{}{}{}{}".format(team_name, status, execution_time, submitted_at)
echo(submission)
def display_submission_details(submission_id):
"""
Function to display details of a particular submission
"""
url = "{}{}".format(get_host_url(), URLS.get_submission.value)
url = url.format(submission_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers, verify=False)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if (response.status_code in EVALAI_ERROR_CODES):
validate_token(response.json())
echo(style("\nError: {}\n"
"\nUse `evalai challenge CHALLENGE phase PHASE submissions` "
"to view your submission.\n".format(response.json()["error"]),
fg="red", bold=True))
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException as err:
echo(style("\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n", bold=True, fg="red"))
sys.exit(1)
response = response.json()
pretty_print_submission_details(response)
| [
"[email protected]"
] | |
42ef36337773564b505ce6de80546070fcc06111 | 8cd0dcbec5c74ba0d4acd42db35e7a500c2479ff | /SourceCode/Python/Contest/01093. Statistics from a Large Sample.py | 339ba737f714a75f42635d47efa84138ec3e1f60 | [] | no_license | roger6blog/LeetCode | b6adb49dafb1622041e46d27054bc2c20e4fe58e | 2d5fa4cd696d5035ea8859befeadc5cc436959c9 | refs/heads/master | 2022-06-06T03:37:33.196630 | 2022-04-05T08:39:29 | 2022-04-05T08:39:29 | 136,396,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,009 | py | import operator
class Solution(object):
def sampleStats(self, count):
"""
:type count: List[int]
:rtype: List[float]
"""
def get_median(count_new, total):
curr = 0
half = total / 2
for k, v in enumerate(count_new):
curr += v
if curr > half:
return k
elif curr == half:
if total % 2:
return k
else:
return float((k + k + 1) / float(2))
leng = len(count)
for c, i in enumerate(count[::-1]):
if i:
max_num = "%5f" % float(leng - c - 1)
break
for c, i in enumerate(count):
if i:
min_num = "%5f" % float(c)
break
total = 0
c_leng = 0
count_new = []
total_len = 0
for c, i in enumerate(count):
if i:
c_leng += i
total += c * i
total_len += i
count_new.append(i)
mode = count_new.index(max(count_new))
mean = "%5f" % (float(total) / float(c_leng))
median = "%5f" % (float(get_median(count_new, total_len)))
ans = [float(min_num), float(max_num), float(mean), float(median), float("%5f" % mode)]
return ans
count = [0,4,3,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
count2 = [0,1,3,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
count3 = [0,1,3,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
count_long = [2725123,2529890,2612115,3807943,3002363,3107290,2767526,981092,896521,2576757,2808163,3315813,2004022,2516900,607052,1203189,2907162,1849193,1486120,743035,3621726,3366475,639843,3836904,462733,2614577,1881392,85099,709390,3534613,360309,404975,715871,2258745,1682843,3725079,564127,1893839,2793387,2236577,522108,1183512,859756,3431566,907265,1272267,2261055,2234764,1901434,3023329,863353,2140290,2221702,623198,955635,304443,282157,3133971,1985993,1113476,2092502,2896781,1245030,2681380,2286852,3423914,3549428,2720176,2832468,3608887,174642,1437770,1545228,650920,2357584,3037465,3674038,2450617,578392,622803,3206006,3685232,2687252,1001246,3865843,2755767,184888,2543886,2567950,1755006,249516,3241670,1422728,809805,955992,415481,26094,2757283,995334,3713918,2772540,2719728,1204666,1590541,2962447,779517,1322374,1675147,3146304,2412486,902468,259007,3161334,1735554,2623893,1863961,520352,167827,3654335,3492218,1449347,1460253,983079,1135,208617,969433,2669769,284741,1002734,3694338,2567646,3042965,3186843,906766,2755956,2075889,1241484,3790012,2037406,2776032,1123633,2537866,3028339,3375304,1621954,2299012,1518828,1380554,2083623,3521053,1291275,180303,1344232,2122185,2519290,832389,1711223,2828198,2747583,789884,2116590,2294299,1038729,1996529,600580,184130,3044375,261274,3041086,3473202,2318793,2967147,2506188,127448,290011,3868450,1659949,3662189,1720152,25266,1126602,1015878,2635566,619797,2898869,3470795,2226675,2348104,2914940,1907109,604482,2574752,1841777,880254,616721,3786049,2278898,3797514,1328854,1881493,1802018,3034791,3615171,400080,2277949,221689,1021253,544372,3101480,1155691,3730276,1827138,3621214,2348383,2305429,313820,36481,2581470,2794393,902504,2589859,740480,2387513,2716342,1914543,3219912,1865333,2388350,3525289,3758988,961406,1539328,448809,1326527,1339048,2924378,2715811,376047,3642811,2973602,389167,1026011,3633833,2848596,3353421,1426817,219995,1503946,2311246,2618861,1497325,3758762,2115273,3238053,2419849,2545790]
sol = Solution()
sol.sampleStats(count3)
| [
"[email protected]"
] | |
ffcf4a4dad0f3655f1d293e4260edaf29d8b414e | ea52444f2bc191e75df1b57f7c27d160856be8c4 | /sigma-girl-MIIRL/run_clustering_all_starcraft.py | 60ce655eb42df63748ce91b205bef53e84fa161c | [] | no_license | LeftAsAnExercise/task1-irl | e00500b50fcd4dcb0f3acaad12b86d8fce67780d | f26e8c71e60e2316a8864cfe18db631c75b6ca78 | refs/heads/master | 2023-08-16T07:44:20.433038 | 2021-10-17T18:26:54 | 2021-10-17T18:26:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,045 | py | import numpy as np
from utils import compute_gradient, load_policy, estimate_distribution_params
from run_clustering import em_clustering
import argparse
import pickle
# Directories where the agent policies, trajectories and gradients (if already calcualted) are stored
# To add agents populate this dictionary and store the gradients in '/gradients/estimated_gradients.npy'
# Or if u want to calculate the gradients directly store the policy as a tf checkpoint in a file called best
# and the trajectories in the subfolder 'trajectories/<subfolder>/K_trajectories.csv'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_layers', type=int, default=1, help='number of hidden layers')
parser.add_argument('--num_hidden', type=int, default=8, help='number of hidden units')
parser.add_argument('--n_experiments', type=int, default=1, help='number of experiments')
parser.add_argument('--gamma', type=float, default=0.99, help='discount factor')
parser.add_argument('--verbose', action='store_true', help='print logs in console')
parser.add_argument('--ep_len', type=int, default=113, help='episode length')
parser.add_argument('--num_clusters', type=int, default=3, help='# of clusters for EM')
parser.add_argument('--save_grad', action='store_true', help='save computed gradients')
parser.add_argument('--mask', action='store_true', help='mask timesteps for baseline in gradient computation')
parser.add_argument('--baseline', action='store_true', help='use baseline in gradient computation')
parser.add_argument('--scale_features', type=int, default=1, help='rescale features in gradient computation')
parser.add_argument('--filter_gradients', action='store_true', help='regularize jacobian matrix')
parser.add_argument('--trainable_variance', action='store_true', help='fit the variance of the policy')
parser.add_argument("--init_logstd", type=float, default=-1, help='initial policy variance')
parser.add_argument('--save_path', type=str, default='./data_starcraft', help='path to save the model')
args = parser.parse_args()
num_clusters = args.num_clusters
n_experiments = args.n_experiments
results = []
n_agents = 1
# where the demonstrations are
demonstrations = 'data_starcraft/'
agent_to_data = [str(i) for i in range(100)] # change to 100
num_objectives = 2
states_data = np.load(demonstrations + 'states_TerranVsTerran_100_150_[16:26].pkl', allow_pickle=True)
actions_data = np.load(demonstrations + 'actions_TerranVsTerran_100_150_3.pkl', allow_pickle=True)
reward_data = np.load(demonstrations + 'rewards_mm_TerranVsTerran_100_150_[ 20 21 -22].pkl', allow_pickle=True)
features_idx = [0, 1] #, 2]
GAMMA = args.gamma
for exp in range(n_experiments):
print("Experiment %s" % (exp+1))
estimated_gradients_all = []
for agent_name in agent_to_data:
X_dataset = states_data[agent_name]
y_dataset = actions_data[agent_name]
r_dataset = reward_data[agent_name]
X_dim = len(X_dataset[0])
y_dim = 3 # number of actions
# Create Policy
model = 'bc/models/' + agent_name + '/12500_2_1605425506.850805/best'
# '/10000_2_1605412033.7539003/best' 20
linear = 'gpomdp' in model
print('load policy..')
policy_train = load_policy(X_dim=X_dim, model=model, continuous=False, num_actions=y_dim,
n_bases=X_dim,
trainable_variance=args.trainable_variance, init_logstd=args.init_logstd,
linear=linear, num_hidden=args.num_hidden, num_layers=args.num_layers)
print('Loading dataset... done')
# compute gradient estimation
estimated_gradients, _ = compute_gradient(policy_train, X_dataset, y_dataset, r_dataset, None,
len(X_dataset), GAMMA, features_idx,
verbose=args.verbose,
use_baseline=args.baseline,
use_mask=args.mask,
scale_features=args.scale_features,
filter_gradients=args.filter_gradients,
normalize_f=False)
estimated_gradients_all.append(estimated_gradients)
# ==================================================================================================================
if args.save_grad:
print("Saving gradients in ", args.save_path)
np.save(args.save_path + '/estimated_gradients.npy', estimated_gradients)
mus = []
sigmas = []
ids = []
#import pdb; pdb.set_trace()
for i, agent in enumerate(agent_to_data):
num_episodes, num_parameters, num_objectives = estimated_gradients_all[i].shape[:]
mu, sigma = estimate_distribution_params(estimated_gradients=estimated_gradients_all[i],
diag=False, identity=False, other_options=[False, True],
cov_estimation=False)
id_matrix = np.identity(num_parameters)
mus.append(mu)
sigmas.append(sigma)
ids.append(id_matrix)
#import pdb; pdb.set_trace()
P, Omega, loss = em_clustering(mus, sigmas, ids, num_clusters=num_clusters,
num_objectives=num_objectives,
optimization_iterations=1)
print(P)
print(Omega)
results.append((P, Omega, loss))
with open(args.save_path + '/results_mm_3.pkl', 'wb') as handle:
pickle.dump(results, handle)
| [
"[email protected]"
] | |
f672d86438ba0b5915fbeb66e0f1ce91c0d0bcac | e8274f167fd219ef78241ba8ea89e5d5875ed794 | /cloud/swift/build/scripts-2.7/swift-object-server | 228172009d40b2bdbe0fcd25681a2c823f476276 | [
"Apache-2.0"
] | permissive | virt2x/folsomCloud | 02db0147f7e0f2ab0375faf4f36ca08272084152 | e6fd612dd77f35a72739cf4d4750e9795c0fa508 | refs/heads/master | 2021-01-01T17:26:28.405651 | 2013-10-17T12:36:04 | 2013-10-17T12:36:04 | 13,647,787 | 0 | 1 | null | 2020-07-24T08:25:22 | 2013-10-17T12:10:24 | Python | UTF-8 | Python | false | false | 832 | #!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.common.utils import parse_options
from swift.common.wsgi import run_wsgi
if __name__ == '__main__':
conf_file, options = parse_options()
run_wsgi(conf_file, 'object-server', default_port=6000, **options)
| [
"[email protected]"
] | ||
0512ff88a682b67ec1f8250b02b119ceb3c2963a | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /linear_dynamical_systems/experiment_bih_all.py | 3d0f01496ca05375be340e9b77f4b8dd1ddf479f | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 13,126 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script for running experiments.
Example to run locally:
python bih.py --output_dir=bih_may21 --channel=both\
--hdim=3 --num_clusters=2
The outputs will show up in output_dir ucr_may19.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import logging
import os
from absl import app
from absl import flags
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
import seaborn as sns
import six
import sklearn
# pylint: disable=g-bad-import-order
import arma
import clustering
import lds
FLAGS = flags.FLAGS
# Flags for IO and plotting.
flags.DEFINE_string('output_dir', None, 'Output filepath.')
flags.DEFINE_boolean(
'load_results', False, 'Whether to skip experiments '
'and only plot existing results from output_dir.')
flags.DEFINE_boolean(
'plot_clusters', False, 'Whether to visualize each '
'experiment run and plot clustering results.')
flags.DEFINE_integer('sample_size', None, 'Sample size of signals for each '
'clustering run.')
flags.DEFINE_boolean(
'filter_type', False, 'Whether to select only certain '
'types of labels according to prior work.')
flags.DEFINE_integer(
'label_count_threshold', 0, 'Threshold for label counts, '
'label as `other` if below the threshold.')
flags.DEFINE_integer('num_repeat', 1,
'Number of repeated runs for bootstrapping neg examples.')
flags.DEFINE_integer('subsample_step_size', 1, '1 for not subsampling')
flags.DEFINE_string('channel', 'both', 'Which channel to use, both or 0 or 1.')
# Flags for hparams in clustering algorithms.
flags.DEFINE_integer('hdim', 0, 'Hidden state dimension.')
flags.DEFINE_integer('num_clusters', 0, 'Desired number of clusters.')
flags.DEFINE_integer(
'LDS_GIBBS_num_update_samples', 100, 'Number of update '
'samples used for fitting LDS in pylds package.')
flags.DEFINE_integer('random_seed', 0, 'Random seed.')
# Flags for whether to include certain baselines.
flags.DEFINE_boolean(
'include_LDS_MLE', False, 'Whether to include MLE '
'estimation for LDS in the experiments. Could be slow.')
flags.DEFINE_boolean(
'include_tslearn', True, 'Whether to include time series '
'clustering methods from the tslearn package in the '
'experiments.')
flags.DEFINE_boolean(
'include_tslearn_slow', False, 'Whether to include time '
'series clustering methods from the tslearn package '
'that are slow: DTW and GAK.')
flags.DEFINE_boolean('include_LDS_GIBBS', True, 'Whether to include the '
'Gibbs sampling method for LDS.')
flags.DEFINE_boolean('include_ARMA_MLE', False, 'Whether to include the '
'MLE method for ARMA.')
def _drop_nan_rows(arr):
return arr[~np.isnan(arr).any(axis=1)]
def _replace_nan_with_0(arr):
return np.where(np.isnan(arr), 0.0, arr)
def create_model_fns(hdim):
"""Util function to create model fns to fit model params to sequences.
Args:
hdim: Guessed hidden dimension for model fitting.
Returns:
A dictionary mapping method names to model_fns. Each model_fn
takes output seq and input seq, and returns fitted model params.
"""
model_fns = collections.OrderedDict()
# Using raw outputs.
# model_fns['raw_output'] = lambda s: _replace_nan_with_0(s.outputs)
# pylint: disable=g-long-lambda
# Pure AR.
model_fns['AR'] = lambda s: arma.fit_ar(
_replace_nan_with_0(s.outputs), None, hdim)
# Iterated regression without regularization and with regularization.
model_fns['ARMA_OLS'] = lambda s: arma.fit_arma_iter(s.outputs, None, hdim)
model_fns['ARMA'] = lambda s: arma.fit_arma_iter(
s.outputs, None, hdim, l2_reg=0.01)
model_fns['ARMA_roots'] = lambda s: arma.get_eig_from_arparams(
arma.fit_arma_iter(s.outputs, None, hdim, l2_reg=0.01))
if FLAGS.include_LDS_GIBBS:
model_fns['LDS'] = lambda s: lds.fit_lds_gibbs(
_replace_nan_with_0(s.outputs),
None,
hdim,
num_update_samples=FLAGS.LDS_GIBBS_num_update_samples)
if FLAGS.include_ARMA_MLE:
model_fns['ARMA_MLE'] = lambda s: arma.fit_arma_mle(
_replace_nan_with_0(s.outputs), None, hdim)
if FLAGS.include_LDS_MLE:
model_fns['LDS_MLE'] = lambda s: lds.fit_lds_mle(
_replace_nan_with_0(s.outputs), None, hdim)
return model_fns
def parse_csv(filename, hdim):
"""Reads ECG data from csv file."""
labels = []
seqs = []
unprocessed_key = None
unprocessed_label = None
unprocessed_ch0 = None
not_full_length = 0
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
key = row[0]
channel = row[1]
label = row[2]
channel_signal = np.array(row[3:]).reshape(-1, 1)
try:
channel_signal = channel_signal.astype(np.float32)
except ValueError:
channel_signal = np.array([float(x) if x else np.nan for x in row[3:]
]).reshape(-1, 1)
# logging.info('Partial signal of len %d with key %s',
# sum(~np.isnan(channel_signal)), key)
not_full_length += 1
if channel == '0':
assert unprocessed_ch0 is None
unprocessed_ch0 = channel_signal
unprocessed_key = key
unprocessed_label = label
if channel == '1':
assert unprocessed_ch0 is not None
seq_len = len(channel_signal)
assert len(unprocessed_ch0) == seq_len
if FLAGS.channel == 'both':
vals = np.concatenate([unprocessed_ch0, channel_signal], axis=1)
elif FLAGS.channel == '0':
vals = unprocessed_ch0
elif FLAGS.channel == '1':
vals = channel_signal
else:
raise ValueError('Unexpected FLAGS.channel value: %s' % FLAGS.channel)
seqs.append(
lds.LinearDynamicalSystemSequence(
np.zeros((seq_len, 1)), np.zeros((seq_len, hdim)), vals))
assert label == unprocessed_label
assert key.split(':')[:2] == unprocessed_key.split(':')[:2]
labels.append(label)
unprocessed_label = None
unprocessed_key = None
unprocessed_ch0 = None
logging.info('Total seqs: %d, partial length seqs: %d.', len(seqs),
not_full_length)
if FLAGS.filter_type:
seqs, labels = filter_type(seqs, labels)
seqs, labels = drop_infreq_labels(seqs, labels)
return seqs, labels
def _subsample_rows(arr, step_size):
return np.concatenate(
[arr[j:j + 1, :] for j in xrange(0, arr.shape[0], step_size)], axis=0)
def subsample(sequences, step_size=5):
subsampled = []
for s in sequences:
subsampled.append(
lds.LinearDynamicalSystemSequence(
_subsample_rows(s.inputs, step_size),
_subsample_rows(s.hidden_states, step_size),
_subsample_rows(s.outputs, step_size)))
return subsampled
def print_label_info(labels):
label_vocab, label_counts = np.unique(labels, return_counts=True)
df = pd.DataFrame(index=label_vocab, data={'count': label_counts})
print(df.sort_values('count', ascending=False).to_latex())
def filter_type(seqs, labels):
types = ['N', 'AFIB', 'VT', 'P', 'AFL']
seqs = [seqs[i] for i in xrange(len(seqs)) if labels[i] in types]
labels = [l for l in labels if l in types]
return seqs, labels
def drop_infreq_labels(seqs, labels):
"""Filter out infrequent labels."""
label_vocab, label_counts = np.unique(labels, return_counts=True)
is_dropped = {}
for i in xrange(len(label_vocab)):
logging.info('Found label %s, with count %d.', label_vocab[i],
label_counts[i])
if label_counts[i] < FLAGS.label_count_threshold:
logging.info('Dropped label %s.', label_vocab[i])
is_dropped[label_vocab[i]] = True
else:
is_dropped[label_vocab[i]] = False
seqs = [seqs[i] for i in xrange(len(seqs)) if not is_dropped[labels[i]]]
labels = [l for l in labels if not is_dropped[l]]
return seqs, labels
def sample_rebalance(seqs, labels):
"""Resample the data to have equal prevalence for each label."""
label_vocab = np.unique(labels)
n_samples_per_class = int(FLAGS.sample_size / len(label_vocab))
sampled_seqs = []
sampled_labels = []
for l in label_vocab:
l_seqs = [seqs[i] for i in xrange(len(seqs)) if labels[i] == l]
l_labels = [labels[i] for i in xrange(len(seqs)) if labels[i] == l]
sampled_l_seqs, sampled_l_labels = sklearn.utils.resample(
l_seqs, l_labels, n_samples=n_samples_per_class)
sampled_seqs.extend(sampled_l_seqs)
sampled_labels.extend(sampled_l_labels)
return sklearn.utils.shuffle(sampled_seqs, sampled_labels)
def get_results_bih_dataset(seqs, labels, hdim, num_clusters):
"""Returns a dataframe of clustering results on the ECG dataset."""
label_vocab, label_counts = np.unique(labels, return_counts=True)
logging.info('Counts of labels in current run: %s',
str(label_vocab) + ' ' + str(label_counts))
label_lookup = {l: i for i, l in enumerate(label_vocab)}
cluster_ids = [label_lookup[l] for l in labels]
model_fns = create_model_fns(hdim)
padded = clustering.pad_seqs_to_matrix(seqs)
max_seq_len = np.max([s.seq_len for s in seqs])
pca = sklearn.decomposition.PCA(n_components=hdim).fit(_drop_nan_rows(padded))
# pylint: disable=g-long-lambda
model_fns['PCA'] = lambda s: pca.transform(
_replace_nan_with_0(clustering.pad_seqs_to_matrix([s], max_seq_len))
).flatten()
# Get clustering results.
results_df = clustering.get_results(
seqs,
num_clusters,
cluster_ids,
None,
model_fns,
include_tslearn=FLAGS.include_tslearn,
include_slow_methods=FLAGS.include_tslearn_slow)
logging.info(results_df)
if FLAGS.plot_clusters:
clustering.visualize_clusters(
seqs, None, labels, model_fns,
os.path.join(FLAGS.output_dir, 'visualization.png'))
return results_df
def get_agg_stats(df):
"""Writes a csv file with aggregated stats."""
for metric in df.columns.values:
if metric == 'method':
continue
stats = df.groupby(['method'])[metric].agg(['mean', 'count', 'std'])
ci95_hi = []
ci95_lo = []
mean_w_ci = []
for i in stats.index:
m, c, s = stats.loc[i]
ci95_hi.append(m + 1.96 * s / np.sqrt(c))
ci95_lo.append(m - 1.96 * s / np.sqrt(c))
mean_w_ci.append(
'%.2f (%.2f-%.2f)' %
(m, m - 1.96 * s / np.sqrt(c), m + 1.96 * s / np.sqrt(c)))
stats['ci95_hi'] = ci95_hi
stats['ci95_lo'] = ci95_lo
stats['mean_w_ci'] = mean_w_ci
logging.info(metric)
logging.info(stats[['mean_w_ci']])
stats.to_csv(os.path.join(FLAGS.output_dir, metric + '_agg.csv'))
def plot_results(results_df, output_dir):
"""Plots metrics and saves plots as png files."""
for metric_name in results_df.columns:
if metric_name == 'seq_len' or metric_name == 'method':
continue
pylab.figure()
sns.lineplot(
x='seq_len',
y=metric_name,
data=results_df,
hue='method',
estimator=np.mean,
err_style='bars')
output = six.StringIO()
pylab.savefig(output, format='png')
image = output.getvalue()
with open(os.path.join(output_dir, metric_name + '.png'), 'w+') as f:
f.write(image)
def main(unused_argv):
np.random.seed(0)
if FLAGS.load_results:
with open(os.path.join(FLAGS.output_dir, 'results.csv'), 'r') as f:
results_df = pd.read_csv(f, index_col=False)
plot_results(results_df, FLAGS.output_dir)
return
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
combined_results_list = []
with open(os.path.join(FLAGS.output_dir, 'flags.txt'), 'w+') as f:
f.write(str(FLAGS.flag_values_dict()))
seqs, labels = parse_csv('mit-bih/all_classes.csv', FLAGS.hdim)
for _ in xrange(FLAGS.num_repeat):
seqs, labels = sample_rebalance(seqs, labels)
results_df = get_results_bih_dataset(seqs, labels, FLAGS.hdim,
FLAGS.num_clusters)
combined_results_list.append(results_df)
results_df = pd.concat(combined_results_list)
with open(os.path.join(FLAGS.output_dir, 'results.csv'), 'w+') as f:
results_df.to_csv(f, index=False)
get_agg_stats(results_df)
# plot_results(results_df, FLAGS.output_dir)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| [
"[email protected]"
] | |
28d757261b5d9e4a891e17ece4d57ba395c7bc10 | 76192480d7469e3d7f6ac8d8bbc3334445e5fddc | /splendor/schema/__init__.py | c6f3595b799b7af7e53f6708f7ec3f0db26acd82 | [] | no_license | forgeworks/splendor | b7d383a154bf72701a00d005f9aafbd3e90a6b30 | f99d66b76971f318637944a8ce5921367ee4aa21 | refs/heads/master | 2023-05-12T03:07:17.860147 | 2020-04-03T17:38:55 | 2020-04-03T17:38:55 | 155,748,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from .native import *
from .fields import *
from .base import ConstraintFailure | [
"[email protected]"
] | |
74104b452e8cd41e68511e71935646368f97a602 | 17f3568e0be991636501970fb76c4c53a71ab38d | /opsgenie_sdk/api/alert/list_alert_notes_response_all_of.py | 99f419396432078c721f4f07e3574078810826d8 | [
"Apache-2.0"
] | permissive | jkinred/opsgenie-python-sdk | 7b79ed8c7518de117887e6b76a3fbb5800b94020 | 69bbd671d2257c6c3ab2f3f113cb62bd1a941c02 | refs/heads/master | 2020-07-10T00:24:19.583708 | 2019-08-24T06:35:31 | 2019-08-24T06:35:31 | 204,118,572 | 0 | 0 | NOASSERTION | 2019-08-24T06:29:25 | 2019-08-24T06:29:24 | null | UTF-8 | Python | false | false | 3,739 | py | # coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ListAlertNotesResponseAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'list[AlertNote]',
'paging': 'AlertPaging'
}
attribute_map = {
'data': 'data',
'paging': 'paging'
}
def __init__(self, data=None, paging=None): # noqa: E501
"""ListAlertNotesResponseAllOf - a model defined in OpenAPI""" # noqa: E501
self._data = None
self._paging = None
self.discriminator = None
if data is not None:
self.data = data
if paging is not None:
self.paging = paging
@property
def data(self):
"""Gets the data of this ListAlertNotesResponseAllOf. # noqa: E501
:return: The data of this ListAlertNotesResponseAllOf. # noqa: E501
:rtype: list[AlertNote]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListAlertNotesResponseAllOf.
:param data: The data of this ListAlertNotesResponseAllOf. # noqa: E501
:type: list[AlertNote]
"""
self._data = data
@property
def paging(self):
"""Gets the paging of this ListAlertNotesResponseAllOf. # noqa: E501
:return: The paging of this ListAlertNotesResponseAllOf. # noqa: E501
:rtype: AlertPaging
"""
return self._paging
@paging.setter
def paging(self, paging):
"""Sets the paging of this ListAlertNotesResponseAllOf.
:param paging: The paging of this ListAlertNotesResponseAllOf. # noqa: E501
:type: AlertPaging
"""
self._paging = paging
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAlertNotesResponseAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
fc0d25830c60ada4c3c30ac76d6df747ce35bebe | 0cd0ffbdc849b265e8bbeb2369d6a320a21ec592 | /plugins/SettingsColorMapping.py | b86b8190d6d33665ef1eda5d4d48ac30147a1e2a | [] | no_license | ktskhai/vb25 | 7d0253d217e125036f35dd0d05fc05dbf9bc4800 | c81ba1506d12eab1a6b1536b5882aa9aa8589ae3 | refs/heads/master | 2021-01-23T01:01:11.833095 | 2013-12-03T15:01:02 | 2013-12-03T15:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,449 | py | '''
V-Ray/Blender
http://vray.cgdo.ru
Author: Andrey M. Izrantsev (aka bdancer)
E-Mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
'''
''' Blender modules '''
import bpy
from bpy.props import *
''' vb modules '''
from vb25.utils import *
from vb25.ui.ui import *
TYPE = 'SETTINGS'
ID = 'SettingsColorMapping'
NAME = 'Color mapping'
DESC = "Color mapping options"
PARAMS = (
'type',
'affect_background',
'dark_mult',
'bright_mult',
'gamma',
'subpixel_mapping',
'clamp_output',
'clamp_level',
'adaptation_only',
'linearWorkflow',
)
def getColorMappingData(scene):
TYPE = {
'LNR' : 0,
'EXP' : 1,
'HSV' : 2,
'INT' : 3,
'GCOR' : 4,
'GINT' : 5,
'REIN' : 6,
}
VRayScene = scene.vray
SettingsColorMapping = VRayScene.SettingsColorMapping
cmData = "\nSettingsColorMapping ColorMapping {"
for param in PARAMS:
if param == 'type':
value = TYPE[SettingsColorMapping.type]
else:
value = getattr(SettingsColorMapping, param)
cmData += "\n\t%s= %s;" % (param, p(value))
cmData += "\n}\n"
return cmData
def updatePreviewColorMapping(self, context):
if bpy.context.scene.render.engine == 'VRAY_RENDER_PREVIEW':
open(getColorMappingFilepath(), 'w').write(getColorMappingData(context.scene))
def add_properties(rna_pointer):
class SettingsColorMapping(bpy.types.PropertyGroup):
pass
bpy.utils.register_class(SettingsColorMapping)
rna_pointer.SettingsColorMapping= PointerProperty(
name = "Color Mapping",
type = SettingsColorMapping,
description = "Color mapping settings"
)
SettingsColorMapping.type= EnumProperty(
name = "Type",
description = "Color mapping type",
items = (
('LNR',"Linear",""),
('EXP',"Exponential",""),
('HSV',"HSV exponential",""),
('INT',"Intensity exponential",""),
('GCOR',"Gamma correction",""),
('GINT',"Intensity gamma",""),
('REIN',"Reinhard","")
),
update = updatePreviewColorMapping,
default = "LNR"
)
SettingsColorMapping.affect_background= BoolProperty(
name= "Affect background",
description= "Affect colors belonging to the background",
update = updatePreviewColorMapping,
default= True
)
SettingsColorMapping.dark_mult= FloatProperty(
name= "Dark multiplier",
description= "Multiplier for dark colors",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 1.0,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.bright_mult= FloatProperty(
name= "Bright multiplier",
description= "Multiplier for bright colors",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 1.0,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.gamma= FloatProperty(
name= "Gamma",
description= "Gamma correction for the output image regardless of the color mapping mode",
min= 0.0,
max= 10.0,
soft_min= 1.0,
soft_max= 2.2,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.input_gamma= FloatProperty(
name= "Input gamma",
description= "Input gamma for textures",
min= 0.0,
max= 10.0,
soft_min= 1.0,
soft_max= 2.2,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.clamp_output= BoolProperty(
name= "Clamp output",
description= "Clamp colors after color mapping",
update = updatePreviewColorMapping,
default= True
)
SettingsColorMapping.clamp_level= FloatProperty(
name= "Clamp level",
description= "The level at which colors will be clamped",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 100.0,
update = updatePreviewColorMapping,
default= 1.0
)
SettingsColorMapping.subpixel_mapping= BoolProperty(
name= "Sub-pixel mapping",
description= "This option controls whether color mapping will be applied to the final image pixels, or to the individual sub-pixel samples",
update = updatePreviewColorMapping,
default= False
)
SettingsColorMapping.adaptation_only= BoolProperty(
name= "Adaptation only",
description= "When this parameter is on, the color mapping will not be applied to the final image, however V-Ray will proceed with all its calculations as though color mapping is applied (e.g. the noise levels will be corrected accordingly)",
update = updatePreviewColorMapping,
default= False
)
SettingsColorMapping.linearWorkflow= BoolProperty(
name= "Linear workflow",
description= "When this option is checked V-Ray will automatically apply the inverse of the Gamma correction that you have set in the Gamma field to all materials in scene",
update = updatePreviewColorMapping,
default= False
)
def write(bus):
if bus['preview']:
return
cmData = getColorMappingData(bus['scene'])
bus['files']['colorMapping'].write(cmData)
bus['files']['scene'].write(cmData)
| [
"[email protected]"
] | |
02f14f760f96ab9724a6dac403a19358ec93b6e9 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/nanachi_20200619190301.py | 32648dd56a93b5271d816872d3e65fa8b5ce3edd | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import telebot
bot = telebot.TeleBot('776550937:AAELEr0c3H6dM-9QnlDD-0Q0Fcd65pPyAiM')
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text[0].lower() == "н" and check_all:
bot.send_message(message.chat.id, message.text + message.text[1:] )
bot.polling()
def check_all(string, later):
for l in string:
if l != later:
return False | [
"[email protected]"
] | |
201776c5e0e6919d311da86f24aec57b1984a584 | f1fd82d3d9d19f171c5ac83fef418f7584b1beba | /server.py | 59a5448d2019def2bbcf9a8baa932b4c0bb195f7 | [] | no_license | adinahhh/ratings | 5fc39ac6994f342485a52cf7200322632128d0c7 | 431b713343f14f2f98d63b4fbe4731777716bf74 | refs/heads/master | 2023-02-08T14:36:04.883882 | 2020-02-25T22:31:16 | 2020-02-25T22:31:16 | 242,199,940 | 0 | 0 | null | 2023-02-02T05:14:01 | 2020-02-21T17:59:07 | Python | UTF-8 | Python | false | false | 4,239 | py | """Movie Ratings."""
from jinja2 import StrictUndefined
from flask import (Flask, render_template, redirect, request, flash,
session)
from flask_debugtoolbar import DebugToolbarExtension
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails
# silently. This is horrible. Fix this so that, instead, it raises an
# error.
app.jinja_env.undefined = StrictUndefined
@app.route('/')
def index():
"""Homepage."""
return render_template("homepage.html")
@app.route('/users')
def user_list():
"""Show list of users. """
users = User.query.all()
return render_template("user_list.html", users=users)
@app.route('/registration', methods=['POST', 'GET'])
def registration():
"""Show user registration form or create user if email not in use."""
if request.method == 'POST':
email = request.form.get('email')
user_confirmed = User.query.filter(User.email == email).all()
if len(user_confirmed) == 0:
user = User(email=email, password=request.form.get('password'))
db.session.add(user)
db.session.commit()
flash('User successfully created')
else:
flash('User not created. Email associated with another user.')
return redirect('/')
return render_template('registration.html')
@app.route('/show_login')
def show_login():
"""Show login form."""
return render_template('login_form.html')
@app.route('/login', methods=['POST'])
def login():
"""Logs in existing user."""
email = request.form.get('email')
password = request.form.get('password')
existing_user = User.query.filter(User.email == email,
User.password == password).all()
if len(existing_user) > 0:
session['user_id'] = existing_user[0].user_id
flash('Logged in')
return redirect('/')
else:
flash('User does not exist. Please create an account.')
return redirect('/registration')
@app.route('/logout')
def logout():
""" log user out of session"""
flash('You are logged out.')
if session.get('user_id'):
del session['user_id']
return redirect('/')
@app.route('/users/<int:user_id>')
def user_details(user_id):
"""Show user details page"""
user = User.query.get(user_id)
return render_template("user_details.html", user=user)
@app.route('/movies')
def movie_list():
"""Show movie list."""
movies = Movie.query.order_by("title").all()
return render_template('movie_list.html', movies=movies)
@app.route('/movies/<int:movie_id>')
def movie_details(movie_id):
""" Show details about movie."""
movie = Movie.query.get(movie_id)
rating = None
if "user_id" in session:
user_id = session['user_id']
rating = Rating.query.filter_by(user_id=user_id,
movie_id=movie_id).first()
return render_template("movie_details.html", movie=movie, rating=rating)
@app.route('/add_rating/<int:movie_id>', methods=['POST'])
def update_rating(movie_id):
""" Add new rating, or update existing rating for existing users """
user_id = session['user_id']
score = request.form.get('score')
rating = Rating.query.filter_by(user_id=user_id, movie_id=movie_id).first()
if rating is None:
new_rating = Rating(score=score, movie_id=movie_id, user_id=user_id)
db.session.add(new_rating)
db.session.commit()
flash('Your score has been added!')
else:
rating.score = score
db.session.commit()
flash('Your score has been updated!')
return redirect('/movies')
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the
# point that we invoke the DebugToolbarExtension
app.debug = True
# make sure templates, etc. are not cached in debug mode
app.jinja_env.auto_reload = app.debug
connect_to_db(app)
# Use the DebugToolbar
DebugToolbarExtension(app)
app.run(port=5000, host='0.0.0.0')
| [
"[email protected]"
] | |
736f785df9def8088dea0aae9dabe82b16a9740c | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_10/models/file_system_clients_response.py | e1eec856f1c72463b8a3660b8bccb67ac5c2d070 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,213 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.10, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_10 import models
class FileSystemClientsResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[FileSystemClient]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.FileSystemClient]
):
"""
Keyword args:
items (list[FileSystemClient]): A list of file system clients.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemClientsResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemClientsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemClientsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
1372a50bc975078801851f3c1bd1d16d11352f68 | 998cb658bc8843fecf53542478419b6de42c1102 | /backend/manage.py | 6edb4d358d82be7e7a53452864ae0452f5058a10 | [] | no_license | crowdbotics-apps/mobile-14-aug-dev-8991 | ab38c6c2ab28547087c22bd658698d8e89830c97 | de9ac59b415aac99a1705fffb5193026958f96c5 | refs/heads/master | 2023-07-02T04:15:13.913560 | 2020-08-14T12:13:39 | 2020-08-14T12:13:39 | 287,445,399 | 0 | 0 | null | 2021-08-03T20:02:00 | 2020-08-14T04:45:02 | JavaScript | UTF-8 | Python | false | false | 642 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mobile_14_aug_dev_8991.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
bf3db0cec63be8c811c677ef82ada20aa6592901 | 55d560fe6678a3edc9232ef14de8fafd7b7ece12 | /libs/python/test/data_members.py | 37bef0d7048313adce6da1d258338609b84bedc1 | [
"BSL-1.0"
] | permissive | stardog-union/boost | ec3abeeef1b45389228df031bf25b470d3d123c5 | caa4a540db892caa92e5346e0094c63dea51cbfb | refs/heads/stardog/develop | 2021-06-25T02:15:10.697006 | 2020-11-17T19:50:35 | 2020-11-17T19:50:35 | 148,681,713 | 0 | 0 | BSL-1.0 | 2020-11-17T19:50:36 | 2018-09-13T18:38:54 | C++ | UTF-8 | Python | false | false | 2,467 | py | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from data_members_ext import *
---- Test static data members ---
>>> v = Var('slim shady')
>>> Var.ro2a.x
0
>>> Var.ro2b.x
0
>>> Var.rw2a.x
0
>>> Var.rw2b.x
0
>>> v.ro2a.x
0
>>> v.ro2b.x
0
>>> v.rw2a.x
0
>>> v.rw2b.x
0
>>> Var.rw2a.x = 777
>>> Var.ro2a.x
777
>>> Var.ro2b.x
777
>>> Var.rw2a.x
777
>>> Var.rw2b.x
777
>>> v.ro2a.x
777
>>> v.ro2b.x
777
>>> v.rw2a.x
777
>>> v.rw2b.x
777
>>> Var.rw2b = Y(888)
>>> y = Y(99)
>>> y.q = True
>>> y.q
True
>>> y.q = False
>>> y.q
False
>>> Var.ro2a.x
888
>>> Var.ro2b.x
888
>>> Var.rw2a.x
888
>>> Var.rw2b.x
888
>>> v.ro2a.x
888
>>> v.ro2b.x
888
>>> v.rw2a.x
888
>>> v.rw2b.x
888
>>> v.rw2b.x = 999
>>> Var.ro2a.x
999
>>> Var.ro2b.x
999
>>> Var.rw2a.x
999
>>> Var.rw2b.x
999
>>> v.ro2a.x
999
>>> v.ro2b.x
999
>>> v.rw2a.x
999
>>> v.rw2b.x
999
>>> Var.ro1a
0
>>> Var.ro1b
0
>>> Var.rw1a
0
>>> Var.rw1b
0
>>> v.ro1a
0
>>> v.ro1b
0
>>> v.rw1a
0
>>> v.rw1b
0
>>> Var.rw1a = 777
>>> Var.ro1a
777
>>> Var.ro1b
777
>>> Var.rw1a
777
>>> Var.rw1b
777
>>> v.ro1a
777
>>> v.ro1b
777
>>> v.rw1a
777
>>> v.rw1b
777
>>> Var.rw1b = 888
>>> Var.ro1a
888
>>> Var.ro1b
888
>>> Var.rw1a
888
>>> Var.rw1b
888
>>> v.ro1a
888
>>> v.ro1b
888
>>> v.rw1a
888
>>> v.rw1b
888
>>> v.rw1b = 999
>>> Var.ro1a
999
>>> Var.ro1b
999
>>> Var.rw1a
999
>>> Var.rw1b
999
>>> v.ro1a
999
>>> v.ro1b
999
>>> v.rw1a
999
>>> v.rw1b
999
-----------------
>>> x = X(42)
>>> x.x
42
>>> try: x.x = 77
... except AttributeError: pass
... else: print('no error')
>>> x.fair_value
42.0
>>> y = Y(69)
>>> y.x
69
>>> y.x = 77
>>> y.x
77
>>> v = Var("pi")
>>> v.value = 3.14
>>> v.name
'pi'
>>> v.name2
'pi'
>>> v.get_name1()
'pi'
>>> v.get_name2()
'pi'
>>> v.y.x
6
>>> v.y.x = -7
>>> v.y.x
-7
>>> v.name3
'pi'
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
| [
"[email protected]"
] | |
c747a958e62fe8af848ebf95ee593021b8fc9fee | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/462/usersdata/308/105022/submittedfiles/avenida.py | d5a90601513ac9551b6535453d1dd15ef5a5326f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # -*- coding: utf-8 -*-
def inteiro(texto, min, max):
valor = int(input(texto))
while min<=valor or valor>=max:
valor = int(input(texto))
return valor
m = inteiro('Informe a quantidade de quadras no sentido Norte-Sul: ', 2, 1000)
print(m) | [
"[email protected]"
] | |
839694ce63e2b101bc8a70244513e7ecd986f067 | df789505c99974c0ba45adc57e52fc7865ff2a28 | /class_system/src/services/admin_service.py | c21e20a9d7b6c4e87f806b41d0643eea93644496 | [] | no_license | zhiwenwei/python | 6fc231e47a9fbb555efa287ac121546e07b70f06 | 76d267e68f762ee9d7706e1800f160929544a0a3 | refs/heads/master | 2021-01-20T04:21:44.825752 | 2018-12-19T06:20:10 | 2018-12-19T06:20:10 | 89,676,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | #-*- coding:utf-8 -*-
#Author:Kevin
import sys,os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) #添加环境变量
from models import School
def create_school():#创建学校
# try:
name = input("请输入学校名字")
addr = input("请输入学校地址:")
school_name_list = [(obj.name,obj.addr) for obj in School.get_all_obj_list()]
# if (name,addr) in school_name_list:
# raise Exception('\033[43;1m[%s] [%s]校区 已经存在,不可重复创建\033[0m' % (name, addr))
obj = School(name,addr)
# print(school_name_list)
obj.save()
# status =True
data = "[%s] [%s]校区创建成功"%(obj.name,obj.addr)
print(data)
# except Exception as e:
# status = False
# error =str(e)
# data = ''
# return {'status': status, 'error': error, 'data': data}
create_school() | [
"[email protected]"
] | |
2206bfd7b874e66585e69e7e4f615ef67045f700 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4065/codes/1602_2894.py | 28a0e854a1d6a6c2f3a6d82537d62a75f1b0641b | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Este código é apenas um ESBOÇO da solução.
# Modifique-o para atender as especificações do enunciado.
# Leitura das entradas e conversao para float:
var = float(input("Qual o valor unitario do jogo? "))
# Calculo do valor a ser pago, incluindo o frete:
total = float(var*8 + 45)
# Impressao do valor total:
print(total) | [
"[email protected]"
] | |
af3381ae78bf698dff8a7c97324d886c71b16a41 | ec827bd5df431c9400946e8d0593448814b5534b | /venv/bin/rst2latex.py | a52e3650ad536413c6bad8de9f042089b2ea2846 | [] | no_license | grantnicholas/pytone | 7acd70878de8090d06d7a2911a67b3dbb3b64256 | b89c688cc88588a3758fff288bc9b1364534b42e | refs/heads/master | 2021-01-23T06:19:47.203418 | 2014-09-21T21:52:27 | 2014-09-21T21:52:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | #!/home/grant/Desktop/pytone/venv/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"[email protected]"
] | |
683053f40d2cf500cb405bf87ac2b8c2729e555a | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/gos_20200614062720.py | d6f85c522d6e82fc164a1c2ba47e9fea286c6ff5 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,421 | py | # # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import requests
# # Ітеруємося по масиву з адресами зображень
# for i, pic_url in enumerate(["http://x.com/nanachi.jpg", "http://x.com/nezuko.jpg"]):
# # Відкриваємо файл базуючись на номері ітерації
# with open('pic{0}.jpg'.format(i), 'wb') as handle:
# # Отримуємо картинку
# response = requests.get(pic_url, stream=True)
# # Використовуючи умовний оператор перевіряємо чи успішно виконався запит
# if not response.ok:
# print(response)
# # Ітеруємося по байтах картинки та записуємо батчаси в 1024 до файлу
# for block in response.iter_content(1024):
# # Якщо байти закінчилися, завершуємо алгоритм
# if not block:
# break
# # Записуємо байти в файл
# handle.write(block)
# -----------------------------------------------------------------------------------------------------------
# # Створюємо клас для рахунку
# class Bank_Account:
# # В конструкторі ініціалізуємо рахунок як 0
# def __init__(self):
# self.balance=0
# print("Hello!!! Welcome to the Deposit & Withdrawal Machine")
# # В методі депозит, використовуючи функцію input() просимо ввести суму поповенння та додаємо цю суму до рахунку
# def deposit(self):
# amount=float(input("Enter amount to be Deposited: "))
# self.balance += amount
# print("\n Amount Deposited:",amount)
# # В методі депозит, використовуючи функцію input() просимо ввести суму отримання та віднімаємо цю суму від рахунку
# def withdraw(self):
# amount = float(input("Enter amount to be Withdrawn: "))
# # За допомогою умовного оператора перевіряємо чи достатнього грошей на рахунку
# if self.balance>=amount:
# self.balance-=amount
# print("\n You Withdrew:", amount)
# else:
# print("\n Insufficient balance ")
# # Виводимо бааланс на екран
# def display(self):
# print("\n Net Available Balance=",self.balance)
# # Створюємо рахунок
# s = Bank_Account()
# # Проводимо операції з рахунком
# s.deposit()
# s.withdraw()
# s.display()
# -----------------------------------------------------------------------------------------------------------
# # Створюємо рекурсивну функцію яка приймає десяткове число
# def decimalToBinary(n):
# # перевіряємо чи число юільше 1
# if(n > 1):
# # Якщо так, ділемо на 2 юез остачі та рекурсивно викликаємо функцію
# decimalToBinary(n//2)
# # Якщо ні, виводимо на остачу ділення числа на 2
# print(n%2, end=' ')
# # Створюємо функцію яка приймає бінарне число
# def binaryToDecimal(binary):
# # Створюємо додаткову змінну
# binary1 = binary
# # Ініціалізуємо ще 3 змінню даючи їм значення 0
# decimal, i, n = 0, 0, 0
# # Ітеруємося до тих пір поки передане нами число не буде 0
# while(binary != 0):
# # Отримуємо остачу від ділення нашого чила на 10 на записуємо в змінну
# dec = binary % 10
# # Додаємо до результату суму попереднього результату та добуток від dec та піднесення 2 до степеня номеру ітерації
# decimal = decimal + dec * pow(2, i)
# # Змінюємо binary
# binary = binary//10
# # Додаємо 1 до кількості ітерацій
# i += 1
# # Виводимо результат
# print(decimal)
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import re
# # В умовному операторі перевіряємо чи підходить введена пошта під знайдений з інтернету regex
# if re.match(r"[^@]+@[^@]+\.[^@]+", "[email protected]"):
# # Якщо так, виводиму valid
# print("valid")
# -----------------------------------------------------------------------------------------------------------
# # Створення функції яка приймає текст для шифрування та здвиг
# def encrypt(text,s):
# # Створення змінної для результату
# result = ""
# # Ітеруємося по тексту використовуючи range та довжину тексту
# for i in range(len(text)):
# # Беремо літеру базуючись на номері ітерації
# char = text[i]
# # Перевіряємо чи ця літера велика
# if (char.isupper()):
# # Кодуємо літеру базуючись на її номері
# result += chr((ord(char) + s-65) % 26 + 65)
# else:
# # Кодуємо літеру базуючись на її номері
# result += chr((ord(char) + s - 97) % 26 + 97)
# # Повертаємо результат
# return result
# -----------------------------------------------------------------------------------------------------------
numbers = ["050234234", "050234234", "099234234"] | [
"[email protected]"
] | |
b13da817aede04b68ad39c188fb32a758e46b488 | 490957cf9130f1596c9f81bacff90b13f25eb2e6 | /Problems/Even numbers/task.py | 9cb7f6f386b84458325d9faeb5412c7818ca756b | [] | no_license | TonyNewbie/PaswordHacker | 6eb021e3660aba94d020a7b581dc2787b57556c0 | ac70d64cba58e83e88c00fb2f9c4fcc552efcc35 | refs/heads/master | 2022-11-19T03:29:53.300586 | 2020-07-13T10:37:34 | 2020-07-13T10:37:34 | 279,272,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | n = int(input())
def even():
i = 0
while True:
yield i
i += 2
# Don't forget to print out the first n numbers one by one here
new_generator = even()
for _ in range(n):
print(next(new_generator))
| [
"[email protected]"
] | |
da5440c299a0e972710e88c7311a48cc2e2cb085 | f031ed86f671bf1933bfce899162e8d9bb055f64 | /tf-w2v/word2vec_basic.py | d6b1405d5e060db3dddb62809f6b266ec29ad5d2 | [] | no_license | sushant3095/nlp | 148a6912c56c179822e4fe70464e801879405708 | baa39ac99d2d445e57b6ba79dfa62336868e1d94 | refs/heads/master | 2021-01-19T18:03:13.160594 | 2017-04-13T05:01:16 | 2017-04-13T05:01:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,782 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import datetime
import numpy as np
import time
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=4, skip_window=2)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 1024
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
start_time = time.time()
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss, ", samples/s = %.4f" % (step*batch_size / (time.time() - start_time)))
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
seconds = time.time() - start_time
print("Done %d steps, duration %s, samples/s= %.4f" % (num_steps, datetime.timedelta(seconds=seconds), num_steps*batch_size / seconds))
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| [
"[email protected]"
] | |
5178c6bc234c586a65edf654fd074b59e5be7adb | 40c677f1e39ba53063ced109f4bf23d16162a899 | /orders/views.py | b01d47e358988cc750df02d17479979112a55445 | [] | no_license | AminMohamedAmin/Online-Restaurant-System- | ee25b5d7ff7e52dc6b2ac632f0dd58e38022f6bb | b9aa2d8b8d69ab56437d4b4d039fc935b0b85227 | refs/heads/master | 2022-08-24T21:24:30.224785 | 2020-05-26T11:49:34 | 2020-05-26T11:49:34 | 267,028,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | from django.shortcuts import render, redirect,get_object_or_404
from django.urls import reverse
from .forms import OrderCreateForm
from .models import OrderItem, order
from cart.cart import Cart
############### pdf ####################
from django.contrib.admin.views.decorators import staff_member_required
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import render_to_string
import weasyprint
#########################################
def order_create(request):
cart = Cart(request)
if request.method == 'POST':
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
if cart.coupon:
order.coupon = cart.coupon
order.discount = cart.coupon.discount
order.save()
for item in cart:
OrderItem.objects.create(
order=order,
product=item['product'],
price=item['price'],
quantity=item['quantity'])
cart.clear()
context = {
'order':order,
}
return render(request,'order/created.html',context)
else:
form = OrderCreateForm()
context = {
'cart':cart,
'form':form
}
return render(request,'order/create.html',context)
####################### pdf #######################
@staff_member_required
def admin_order_pdf(request,order_id):
Order = get_object_or_404(order,id=order_id)
html = render_to_string('order/pdf.html',{'order':Order})
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename="order_{}.pdf"'.format(Order.id)
weasyprint.HTML(string=html).write_pdf(response,stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')])
return response
####################################################### | [
"[email protected]"
] | |
6e13b2cc1879d6fcbf5967e111777d18af637fa9 | 8a73cde463081afd76427d5af1e6837bfa51cc47 | /harvester/metadata/management/commands/compare_study_vocabularies.py | 65b82b39e3f1630c29dd6a3827f8bc7c7eecb52d | [
"MIT"
] | permissive | surfedushare/search-portal | 8af4103ec6464e255c5462c672b30f32cd70b4e1 | 63e30ad0399c193fcb686804062cedf3930a093c | refs/heads/acceptance | 2023-06-25T13:19:41.051801 | 2023-06-06T13:37:01 | 2023-06-06T13:37:01 | 254,373,874 | 2 | 1 | MIT | 2023-06-06T12:04:44 | 2020-04-09T13:07:12 | Python | UTF-8 | Python | false | false | 2,940 | py | import requests
import re
from django.core.management.base import BaseCommand
from metadata.models import MetadataValue
uuid4hex = re.compile(r'(?P<uuid>[0-9a-f]{8}\-[0-9a-f]{4}\-4[0-9a-f]{3}\-[89ab][0-9a-f]{3}\-[0-9a-f]{12})', re.I)
class Command(BaseCommand):
@staticmethod
def _get_node_label(node):
return node.get("skos:prefLabel", node.get("dcterms:title", {}))["@value"]
@staticmethod
def _get_node_id(node):
identifier_match = uuid4hex.search(node["@id"])
return identifier_match.group(0)
def _analyze_vocabulary_graph(self, vocabulary_path, graph):
table = {}
missing = set()
found = set()
for node in graph:
identifier = self._get_node_id(node)
table[identifier] = node
mptt_node = MetadataValue.objects.filter(value=identifier).last()
if mptt_node:
found.add(identifier)
continue
mptt_node = MetadataValue.objects.filter(translation__nl=self._get_node_label(node))
if mptt_node:
found.add(identifier)
else:
missing.add(identifier)
print("Graph analyze:", vocabulary_path)
print("found", len(found))
print("missing", len(missing))
print("*"*80)
def _substract_vocabulary_metadata(self, graph, ideas, studies):
for node in graph:
identifier = self._get_node_id(node)
label = self._get_node_label(node)
ideas.pop(identifier, None)
ideas.pop(label, None)
studies.pop(identifier, None)
studies.pop(label, None)
def handle(self, **options):
ideas = {
value.value: value
for value in MetadataValue.objects.filter(field__name="ideas.keyword")
}
studies = {
value.value: value
for value in MetadataValue.objects.filter(field__name="studies")
}
vocabularies = [
"verpleegkunde/verpleegkunde-2019.skos.json",
"informatievaardigheid/informatievaardigheid-2020.skos.json",
"vaktherapie/vaktherapie-2020.skos.json"
]
for vocabulary_path in vocabularies:
vocabulary_response = requests.get(f"https://vocabulaires.edurep.nl/type/vak/{vocabulary_path}")
vocabulary = vocabulary_response.json()
self._analyze_vocabulary_graph(vocabulary_path, vocabulary["@graph"])
self._substract_vocabulary_metadata(vocabulary["@graph"], ideas, studies)
print("Metadata analyze")
print(
"orphan ideas percentage",
int(len(ideas) / MetadataValue.objects.filter(field__name="ideas.keyword").count() * 100)
)
print(
"orphan studies percentage",
int(len(studies) / MetadataValue.objects.filter(field__name="studies").count() * 100)
)
| [
"[email protected]"
] | |
81986ebbff0325c513016a51c2583cc663f4f483 | 03d4f548b0f03d723c776a913c0814508052fbd4 | /src/tsgettoolbox/ulmo/util/__init__.py | 2b22dc7d4c5466883b30a8cf364eede652549a80 | [
"BSD-3-Clause"
] | permissive | timcera/tsgettoolbox | 2cee41cf79fd2a960d66066df5335bb1816f8003 | 1ca7e8c224a8f7c969aff1bbb22f13930cb8f8b0 | refs/heads/main | 2023-09-06T03:22:17.785382 | 2023-07-27T04:06:22 | 2023-07-27T04:06:22 | 40,149,564 | 14 | 4 | BSD-3-Clause | 2022-09-16T23:00:40 | 2015-08-03T21:47:57 | Python | UTF-8 | Python | false | false | 940 | py | from .misc import (
camel_to_underscore,
convert_date,
convert_datetime,
dict_from_dataframe,
dir_list,
download_if_new,
get_ulmo_dir,
mkdir_if_doesnt_exist,
module_with_dependency_errors,
module_with_deprecation_warnings,
open_file_for_url,
parse_fwf,
raise_dependency_error,
save_pretty_printed_xml,
to_bytes,
)
from .raster import (
download_tiles,
extract_from_zip,
generate_raster_uid,
mosaic_and_clip,
)
try:
from .pytables import (
get_default_h5file_path,
get_or_create_group,
get_or_create_table,
open_h5file,
update_or_append_sortable,
)
except ImportError:
get_default_h5file_path = raise_dependency_error
get_or_create_group = raise_dependency_error
get_or_create_table = raise_dependency_error
open_h5file = raise_dependency_error
update_or_append_sortable = raise_dependency_error
| [
"[email protected]"
] | |
489041c27386827df9ebe9a86ebd99213371c75d | 5b5a49643c75aa43d5a876608383bc825ae1e147 | /python99/misc/p702.py | 8888db3ce4a8bc0a289bf66437324404ec628a4c | [] | no_license | rscai/python99 | 281d00473c0dc977f58ba7511c5bcb6f38275771 | 3fa0cb7683ec8223259410fb6ea2967e3d0e6f61 | refs/heads/master | 2020-04-12T09:08:49.500799 | 2019-10-06T07:47:17 | 2019-10-06T07:47:17 | 162,393,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | def knight_tour(n):
return [[(1, 1)]+path for path in doTour(n, n*n-1, (1, 1), [(1, 1)])]
def doTour(n, m, start, path):
if m == 0:
return [[]]
availableMoves = getAvailableMoves(n, path, start)
return [[moveTo(start, move)]+remainPath
for move in availableMoves
for remainPath in doTour(n, m-1, moveTo(start, move), path+[moveTo(start, move)])]
def moveTo(start, move):
return (start[0]+move[0], start[1]+move[1])
def getAvailableMoves(n, path, start):
moveRules = [
(2, 1),
(1, 2),
(-1, 2),
(-2, 1),
(-2, -1),
(-1, -2),
(1, -2),
(2, -1)
]
for move in moveRules:
newPos = moveTo(start, move)
if newPos[0] > 0 and newPos[0] <= n and newPos[1] > 0 and newPos[1] <= n and newPos not in path:
yield move
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.